gauravvjhaa commited on
Commit
b6a80c5
·
1 Parent(s): 8a96280

Simplify app structure - remove FastAPI conflict

Browse files
Files changed (1) hide show
  1. app.py +88 -181
app.py CHANGED
@@ -6,8 +6,6 @@ import base64
6
  from io import BytesIO
7
  import json
8
  from huggingface_hub import hf_hub_download
9
- from fastapi import FastAPI, Request
10
- from fastapi.responses import JSONResponse
11
 
12
  print("🚀 Starting Affecto Inference Service...")
13
 
@@ -29,7 +27,7 @@ print(f"✅ Model downloaded to: {model_path}")
29
 
30
  # Load checkpoint
31
  checkpoint = torch.load(model_path, map_location=device)
32
- print(f"📦 Checkpoint keys: {checkpoint.keys()}")
33
 
34
  # ============================================
35
  # IMAGE PROCESSING UTILITIES
@@ -42,24 +40,22 @@ def preprocess_image(image):
42
  if not isinstance(image, Image.Image):
43
  image = Image.fromarray(image)
44
 
45
- # Ensure RGB
46
  if image.mode != 'RGB':
47
  image = image.convert('RGB')
48
 
49
- # Transform
50
  transform = transforms.Compose([
51
  transforms.Resize((256, 256)),
52
  transforms.ToTensor(),
53
  transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
54
  ])
55
 
56
- tensor = transform(image).unsqueeze(0) # Add batch dimension
57
  return tensor.to(device)
58
 
59
  def postprocess_tensor(tensor):
60
  """Convert tensor to PIL image"""
61
  tensor = tensor.squeeze(0).cpu()
62
- tensor = tensor * 0.5 + 0.5 # Denormalize
63
  tensor = torch.clamp(tensor, 0, 1)
64
 
65
  numpy_image = tensor.numpy().transpose(1, 2, 0)
@@ -79,59 +75,44 @@ def base64_to_pil(base64_str):
79
  return Image.open(BytesIO(image_bytes))
80
 
81
  # ============================================
82
- # MOCK INFERENCE (Replace with actual model)
83
  # ============================================
84
 
85
  def apply_emotion_transform(input_tensor, au_params):
86
- """
87
- Apply emotion transformation
88
-
89
- TODO: Replace this with actual MagicFace inference when you have the model architecture
90
- For now, this is a placeholder that applies simple image adjustments
91
- """
92
  print(f"🎭 Applying transformation with AU params: {au_params}")
93
 
94
- # PLACEHOLDER: Simple brightness/contrast adjustment based on AU params
95
- # In production, replace this with actual model inference
96
  output = input_tensor.clone()
97
 
98
- # Example: Adjust based on AU12 (smile)
99
  if "AU12" in au_params:
100
  intensity = au_params["AU12"]
101
- output = output * (1.0 + intensity * 0.2) # Brighten
102
 
103
- # Example: Adjust based on AU4 (frown)
104
  if "AU4" in au_params:
105
  intensity = au_params["AU4"]
106
- output = output * (1.0 - intensity * 0.15) # Darken
107
 
108
  output = torch.clamp(output, -1, 1)
109
-
110
  return output
111
 
112
  # ============================================
113
- # API ENDPOINT (For Flutter App)
114
  # ============================================
115
 
116
- def transform_api(image_base64, au_params):
117
  """API function for external calls"""
118
  try:
 
 
 
119
  print(f"📥 Received API request with AU params: {au_params}")
120
 
121
- # Decode image
122
  image = base64_to_pil(image_base64)
123
  print(f"📸 Image size: {image.size}")
124
 
125
- # Preprocess
126
  input_tensor = preprocess_image(image)
127
-
128
- # Transform
129
  output_tensor = apply_emotion_transform(input_tensor, au_params)
130
-
131
- # Postprocess
132
  result_image = postprocess_tensor(output_tensor)
133
-
134
- # Encode result
135
  result_base64 = pil_to_base64(result_image)
136
 
137
  print("✅ Transformation complete")
@@ -152,25 +133,38 @@ def transform_api(image_base64, au_params):
152
  "message": "Transformation failed"
153
  }
154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  # ============================================
156
- # GRADIO INTERFACE (For Testing)
157
  # ============================================
158
 
159
  def transform_gradio(image, au_params_str):
160
  """Gradio interface function"""
161
  try:
162
- # Parse AU params
163
  au_params = json.loads(au_params_str)
164
-
165
- # Preprocess
166
  input_tensor = preprocess_image(image)
167
-
168
- # Transform
169
  output_tensor = apply_emotion_transform(input_tensor, au_params)
170
-
171
- # Postprocess
172
  result_image = postprocess_tensor(output_tensor)
173
-
174
  return result_image
175
  except Exception as e:
176
  print(f"❌ Error: {str(e)}")
@@ -178,59 +172,7 @@ def transform_gradio(image, au_params_str):
178
  traceback.print_exc()
179
  return image
180
 
181
- # ============================================
182
- # FASTAPI APP (Must be created BEFORE Gradio)
183
- # ============================================
184
-
185
- app = FastAPI(title="Affecto Inference API")
186
-
187
- @app.get("/")
188
- async def root():
189
- """Root endpoint"""
190
- return {
191
- "message": "Affecto Inference API",
192
- "status": "running",
193
- "version": "1.0.0",
194
- "endpoints": {
195
- "health": "/health",
196
- "transform": "/transform",
197
- "gradio_ui": "/gradio"
198
- }
199
- }
200
-
201
- @app.get("/health")
202
- async def health_check():
203
- """Health check endpoint"""
204
- return {
205
- "status": "healthy",
206
- "model": "magicface",
207
- "device": str(device),
208
- "version": "1.0.0"
209
- }
210
-
211
- @app.post("/transform")
212
- async def api_transform_endpoint(request: Request):
213
- """Main transformation endpoint"""
214
- try:
215
- data = await request.json()
216
- result = transform_api(
217
- image_base64=data["image"],
218
- au_params=data["au_params"]
219
- )
220
- return JSONResponse(content=result)
221
- except Exception as e:
222
- return JSONResponse(
223
- content={
224
- "success": False,
225
- "error": str(e)
226
- },
227
- status_code=500
228
- )
229
-
230
- # ============================================
231
- # GRADIO UI
232
- # ============================================
233
-
234
  with gr.Blocks(theme=gr.themes.Soft(), title="Affecto Inference API") as demo:
235
  gr.Markdown("# 🎭 Affecto - Emotion Transformation API")
236
  gr.Markdown("Transform facial emotions using MagicFace Action Units")
@@ -250,17 +192,15 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Affecto Inference API") as demo:
250
  output_image = gr.Image(type="pil", label="Transformed Result")
251
 
252
  gr.Markdown("### 🎨 Emotion Presets:")
253
- with gr.Row():
254
- gr.Examples(
255
- examples=[
256
- ['{"AU6": 1.0, "AU12": 1.0}', "😊 Happy"],
257
- ['{"AU1": 1.0, "AU4": 1.0, "AU15": 1.0}', "😢 Sad"],
258
- ['{"AU4": 1.0, "AU5": 1.0, "AU7": 1.0, "AU23": 1.0}', "😠 Angry"],
259
- ['{"AU1": 1.0, "AU2": 1.0, "AU5": 1.0, "AU26": 1.0}', "😮 Surprised"],
260
- ],
261
- inputs=[au_params_input],
262
- label="Click to use preset"
263
- )
264
 
265
  transform_btn.click(
266
  fn=transform_gradio,
@@ -272,91 +212,58 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Affecto Inference API") as demo:
272
  gr.Markdown("""
273
  ## API Endpoints
274
 
275
- ### 1. Health Check
276
- **GET** `/health`
277
-
278
- ```bash
279
- curl https://gauravvjhaa-affecto-inference.hf.space/health
280
- ```
281
-
282
- ### 2. Transform Image
283
- **POST** `/transform`
284
 
285
- **Request Format:**
286
  ```json
287
  {
288
- "image": "base64_encoded_image_string",
289
- "au_params": {
290
- "AU6": 1.0,
291
- "AU12": 1.0
292
- }
293
- }
294
- ```
295
-
296
- **Response Format:**
297
- ```json
298
- {
299
- "success": true,
300
- "transformed_image": "base64_encoded_result",
301
- "au_params": {...},
302
- "message": "Transformation successful"
303
  }
304
  ```
305
 
306
- ### Available Action Units (AU):
307
- - **AU1**: Inner Brow Raiser
308
- - **AU2**: Outer Brow Raiser
309
- - **AU4**: Brow Lowerer
310
- - **AU5**: Upper Lid Raiser
311
- - **AU6**: Cheek Raiser
312
- - **AU7**: Lid Tightener
313
- - **AU9**: Nose Wrinkler
314
- - **AU12**: Lip Corner Puller (Smile)
315
- - **AU15**: Lip Corner Depressor
316
- - **AU17**: Chin Raiser
317
- - **AU20**: Lip Stretcher
318
- - **AU23**: Lip Tightener
319
- - **AU25**: Lips Part
320
- - **AU26**: Jaw Drop
321
-
322
- ### Example Usage (Python):
323
- ```python
324
- import requests
325
- import base64
326
-
327
- # Read image
328
- with open("image.jpg", "rb") as f:
329
- image_base64 = base64.b64encode(f.read()).decode()
330
 
331
- # Make request
332
- response = requests.post(
333
- "https://gauravvjhaa-affecto-inference.hf.space/transform",
334
- json={
335
- "image": image_base64,
336
- "au_params": {"AU6": 1.0, "AU12": 1.0}
337
- }
338
- )
339
-
340
- result = response.json()
341
- print(result["success"])
342
- ```
343
-
344
- ### Example Usage (cURL):
345
- ```bash
346
- curl -X POST https://gauravvjhaa-affecto-inference.hf.space/transform \\
347
- -H "Content-Type: application/json" \\
348
- -d '{
349
- "image": "YOUR_BASE64_IMAGE",
350
- "au_params": {"AU6": 1.0, "AU12": 1.0}
351
- }'
352
- ```
353
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
355
- # Mount Gradio to FastAPI at /gradio path
356
- app = gr.mount_gradio_app(app, demo, path="/gradio")
357
 
358
  print("✅ Affecto Inference API Ready!")
359
- print(f"🌐 Root: https://gauravvjhaa-affecto-inference.hf.space/")
360
- print(f"🎨 Gradio UI: https://gauravvjhaa-affecto-inference.hf.space/gradio")
361
- print(f"🔌 API Health: https://gauravvjhaa-affecto-inference.hf.space/health")
362
- print(f"🔌 API Transform: https://gauravvjhaa-affecto-inference.hf.space/transform")
 
 
6
  from io import BytesIO
7
  import json
8
  from huggingface_hub import hf_hub_download
 
 
9
 
10
  print("🚀 Starting Affecto Inference Service...")
11
 
 
27
 
28
  # Load checkpoint
29
  checkpoint = torch.load(model_path, map_location=device)
30
+ print(f"📦 Checkpoint loaded successfully")
31
 
32
  # ============================================
33
  # IMAGE PROCESSING UTILITIES
 
40
  if not isinstance(image, Image.Image):
41
  image = Image.fromarray(image)
42
 
 
43
  if image.mode != 'RGB':
44
  image = image.convert('RGB')
45
 
 
46
  transform = transforms.Compose([
47
  transforms.Resize((256, 256)),
48
  transforms.ToTensor(),
49
  transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
50
  ])
51
 
52
+ tensor = transform(image).unsqueeze(0)
53
  return tensor.to(device)
54
 
55
  def postprocess_tensor(tensor):
56
  """Convert tensor to PIL image"""
57
  tensor = tensor.squeeze(0).cpu()
58
+ tensor = tensor * 0.5 + 0.5
59
  tensor = torch.clamp(tensor, 0, 1)
60
 
61
  numpy_image = tensor.numpy().transpose(1, 2, 0)
 
75
  return Image.open(BytesIO(image_bytes))
76
 
77
  # ============================================
78
+ # TRANSFORMATION
79
  # ============================================
80
 
81
  def apply_emotion_transform(input_tensor, au_params):
82
+ """Apply emotion transformation (placeholder)"""
 
 
 
 
 
83
  print(f"🎭 Applying transformation with AU params: {au_params}")
84
 
 
 
85
  output = input_tensor.clone()
86
 
 
87
  if "AU12" in au_params:
88
  intensity = au_params["AU12"]
89
+ output = output * (1.0 + intensity * 0.2)
90
 
 
91
  if "AU4" in au_params:
92
  intensity = au_params["AU4"]
93
+ output = output * (1.0 - intensity * 0.15)
94
 
95
  output = torch.clamp(output, -1, 1)
 
96
  return output
97
 
98
  # ============================================
99
+ # API FUNCTIONS
100
  # ============================================
101
 
102
+ def transform_api(data):
103
  """API function for external calls"""
104
  try:
105
+ image_base64 = data["image"]
106
+ au_params = data["au_params"]
107
+
108
  print(f"📥 Received API request with AU params: {au_params}")
109
 
 
110
  image = base64_to_pil(image_base64)
111
  print(f"📸 Image size: {image.size}")
112
 
 
113
  input_tensor = preprocess_image(image)
 
 
114
  output_tensor = apply_emotion_transform(input_tensor, au_params)
 
 
115
  result_image = postprocess_tensor(output_tensor)
 
 
116
  result_base64 = pil_to_base64(result_image)
117
 
118
  print("✅ Transformation complete")
 
133
  "message": "Transformation failed"
134
  }
135
 
136
+ def health_check():
137
+ """Health check function"""
138
+ return {
139
+ "status": "healthy",
140
+ "model": "magicface",
141
+ "device": str(device),
142
+ "version": "1.0.0"
143
+ }
144
+
145
+ def root_info():
146
+ """Root info function"""
147
+ return {
148
+ "message": "Affecto Inference API",
149
+ "status": "running",
150
+ "version": "1.0.0",
151
+ "endpoints": {
152
+ "health": "/health",
153
+ "transform": "/transform"
154
+ }
155
+ }
156
+
157
  # ============================================
158
+ # GRADIO INTERFACE
159
  # ============================================
160
 
161
  def transform_gradio(image, au_params_str):
162
  """Gradio interface function"""
163
  try:
 
164
  au_params = json.loads(au_params_str)
 
 
165
  input_tensor = preprocess_image(image)
 
 
166
  output_tensor = apply_emotion_transform(input_tensor, au_params)
 
 
167
  result_image = postprocess_tensor(output_tensor)
 
168
  return result_image
169
  except Exception as e:
170
  print(f"❌ Error: {str(e)}")
 
172
  traceback.print_exc()
173
  return image
174
 
175
+ # Build Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  with gr.Blocks(theme=gr.themes.Soft(), title="Affecto Inference API") as demo:
177
  gr.Markdown("# 🎭 Affecto - Emotion Transformation API")
178
  gr.Markdown("Transform facial emotions using MagicFace Action Units")
 
192
  output_image = gr.Image(type="pil", label="Transformed Result")
193
 
194
  gr.Markdown("### 🎨 Emotion Presets:")
195
+ gr.Examples(
196
+ examples=[
197
+ ['{"AU6": 1.0, "AU12": 1.0}'],
198
+ ['{"AU1": 1.0, "AU4": 1.0, "AU15": 1.0}'],
199
+ ['{"AU4": 1.0, "AU5": 1.0, "AU7": 1.0, "AU23": 1.0}'],
200
+ ['{"AU1": 1.0, "AU2": 1.0, "AU5": 1.0, "AU26": 1.0}'],
201
+ ],
202
+ inputs=[au_params_input],
203
+ )
 
 
204
 
205
  transform_btn.click(
206
  fn=transform_gradio,
 
212
  gr.Markdown("""
213
  ## API Endpoints
214
 
215
+ ### Transform Image
216
+ **POST** `/api/transform`
 
 
 
 
 
 
 
217
 
 
218
  ```json
219
  {
220
+ "image": "base64_encoded_image",
221
+ "au_params": {"AU6": 1.0, "AU12": 1.0}
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  }
223
  ```
224
 
225
+ ### Health Check
226
+ **GET** `/api/health`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
+ Returns service status and model information.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  """)
230
+
231
+ # API endpoints as Gradio functions
232
+ with gr.Tab("🔌 API"):
233
+ with gr.Row():
234
+ with gr.Column():
235
+ gr.Markdown("### POST /api/transform")
236
+ api_input = gr.Textbox(
237
+ label="Request JSON",
238
+ value='{"image": "BASE64_STRING", "au_params": {"AU6": 1.0}}',
239
+ lines=5
240
+ )
241
+ api_btn = gr.Button("Test API")
242
+ api_output = gr.JSON(label="Response")
243
+
244
+ api_btn.click(
245
+ fn=lambda x: transform_api(json.loads(x)),
246
+ inputs=[api_input],
247
+ outputs=[api_output]
248
+ )
249
+
250
+ with gr.Column():
251
+ gr.Markdown("### GET /api/health")
252
+ health_btn = gr.Button("Check Health")
253
+ health_output = gr.JSON(label="Health Status")
254
+
255
+ health_btn.click(
256
+ fn=health_check,
257
+ inputs=[],
258
+ outputs=[health_output]
259
+ )
260
 
261
+ # Add API routes using Gradio's API
262
+ demo.api_names = ["transform", "health", "root"]
263
 
264
  print("✅ Affecto Inference API Ready!")
265
+ print(f"🌐 Gradio UI: https://gauravvjhaa-affecto-inference.hf.space/")
266
+
267
+ # Launch
268
+ if __name__ == "__main__":
269
+ demo.launch(server_name="0.0.0.0", server_port=7860)