Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,34 +16,33 @@ processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
|
| 16 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
| 17 |
|
| 18 |
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float32, device=device)
|
| 19 |
-
depth_map = np.zeros((128, 128), dtype=np.float32)
|
| 20 |
-
depth_map_colored = np.zeros((128, 128, 3), dtype=np.uint8)
|
| 21 |
|
| 22 |
def preprocess_image(image):
|
| 23 |
return cv2.resize(image, (128, 128), interpolation=cv2.INTER_AREA).transpose(2, 0, 1).astype(np.float32) / 255.0
|
| 24 |
|
| 25 |
@torch.inference_mode()
|
| 26 |
def process_frame(image):
|
|
|
|
|
|
|
| 27 |
preprocessed = preprocess_image(image)
|
| 28 |
input_tensor[0] = torch.from_numpy(preprocessed).to(device)
|
| 29 |
-
|
| 30 |
-
if torch.cuda.is_available():
|
| 31 |
-
torch.cuda.synchronize()
|
| 32 |
-
|
| 33 |
predicted_depth = model(input_tensor).predicted_depth
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
cv2.applyColorMap(depth_map, color_map, dst=depth_map_colored)
|
| 39 |
|
| 40 |
-
return depth_map_colored
|
| 41 |
|
| 42 |
interface = gr.Interface(
|
| 43 |
fn=process_frame,
|
| 44 |
inputs=gr.Image(sources="webcam", streaming=True),
|
| 45 |
outputs="image",
|
| 46 |
-
live=True
|
|
|
|
|
|
|
|
|
|
| 47 |
)
|
| 48 |
|
| 49 |
interface.launch()
|
|
|
|
| 16 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
| 17 |
|
| 18 |
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float32, device=device)
|
|
|
|
|
|
|
| 19 |
|
| 20 |
def preprocess_image(image):
|
| 21 |
return cv2.resize(image, (128, 128), interpolation=cv2.INTER_AREA).transpose(2, 0, 1).astype(np.float32) / 255.0
|
| 22 |
|
| 23 |
@torch.inference_mode()
|
| 24 |
def process_frame(image):
|
| 25 |
+
if image is None:
|
| 26 |
+
return None
|
| 27 |
preprocessed = preprocess_image(image)
|
| 28 |
input_tensor[0] = torch.from_numpy(preprocessed).to(device)
|
| 29 |
+
|
|
|
|
|
|
|
|
|
|
| 30 |
predicted_depth = model(input_tensor).predicted_depth
|
| 31 |
+
depth_map = predicted_depth.squeeze().cpu().numpy()
|
| 32 |
+
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
| 33 |
+
depth_map = (depth_map * 255).astype(np.uint8)
|
| 34 |
+
depth_map_colored = cv2.applyColorMap(depth_map, color_map)
|
|
|
|
| 35 |
|
| 36 |
+
return cv2.cvtColor(depth_map_colored, cv2.COLOR_BGR2RGB)
|
| 37 |
|
| 38 |
interface = gr.Interface(
|
| 39 |
fn=process_frame,
|
| 40 |
inputs=gr.Image(sources="webcam", streaming=True),
|
| 41 |
outputs="image",
|
| 42 |
+
live=True,
|
| 43 |
+
batch=False,
|
| 44 |
+
max_batch_size=1,
|
| 45 |
+
update_interval=0.1 # Update every 0.1 seconds
|
| 46 |
)
|
| 47 |
|
| 48 |
interface.launch()
|