Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ import numpy as np
|
|
| 4 |
from transformers import DPTForDepthEstimation, DPTImageProcessor
|
| 5 |
import gradio as gr
|
| 6 |
import torch.nn.utils.prune as prune
|
|
|
|
| 7 |
|
| 8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
|
|
@@ -31,6 +32,11 @@ model = model.to(device)
|
|
| 31 |
|
| 32 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
def preprocess_image(image):
|
| 35 |
image = cv2.resize(image, (128, 128))
|
| 36 |
image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).float().to(device)
|
|
@@ -47,14 +53,13 @@ def process_frame(image):
|
|
| 47 |
# Normalize depth map
|
| 48 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
| 49 |
|
| 50 |
-
#
|
| 51 |
-
|
| 52 |
|
| 53 |
-
#
|
| 54 |
-
|
| 55 |
-
blended = cv2.addWeighted(original_resized, 0.6, depth_color, 0.4, 0)
|
| 56 |
|
| 57 |
-
return
|
| 58 |
|
| 59 |
interface = gr.Interface(
|
| 60 |
fn=process_frame,
|
|
@@ -63,4 +68,4 @@ interface = gr.Interface(
|
|
| 63 |
live=True
|
| 64 |
)
|
| 65 |
|
| 66 |
-
interface.launch()
|
|
|
|
| 4 |
from transformers import DPTForDepthEstimation, DPTImageProcessor
|
| 5 |
import gradio as gr
|
| 6 |
import torch.nn.utils.prune as prune
|
| 7 |
+
from DepthVisualizer import DepthVisualizer
|
| 8 |
|
| 9 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 10 |
|
|
|
|
| 32 |
|
| 33 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
| 34 |
|
| 35 |
+
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
| 36 |
+
color_map = torch.from_numpy(color_map).to(device)
|
| 37 |
+
|
| 38 |
+
visualizer = DepthVisualizer()
|
| 39 |
+
|
| 40 |
def preprocess_image(image):
|
| 41 |
image = cv2.resize(image, (128, 128))
|
| 42 |
image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).float().to(device)
|
|
|
|
| 53 |
# Normalize depth map
|
| 54 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
| 55 |
|
| 56 |
+
# Convert depth map to point cloud
|
| 57 |
+
point_cloud = visualizer.depth_map_to_point_cloud(depth_map)
|
| 58 |
|
| 59 |
+
# Render point cloud
|
| 60 |
+
rendered_image = visualizer.render_frame(point_cloud)
|
|
|
|
| 61 |
|
| 62 |
+
return rendered_image
|
| 63 |
|
| 64 |
interface = gr.Interface(
|
| 65 |
fn=process_frame,
|
|
|
|
| 68 |
live=True
|
| 69 |
)
|
| 70 |
|
| 71 |
+
interface.launch()
|