ecopus commited on
Commit
e2028f3
·
verified ·
1 Parent(s): 3b734dd

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +127 -0
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
+ import io
4
+ import zipfile
5
+ import shutil
6
+ import tempfile
7
+ import pathlib
8
+
9
+ import pandas as pd
10
+ from PIL import Image
11
+ import gradio as gr
12
+ from huggingface_hub import hf_hub_download
13
+ import autogluon.multimodal as agmm
14
+
15
+ MODEL_REPO_ID = "its-zion-18/sign-image-autogluon-predictor"
16
+ ZIP_FILENAME = "autogluon_image_predictor_dir.zip"
17
+ CACHE_DIR = pathlib.Path("hf_assets")
18
+ EXTRACT_DIR = CACHE_DIR / "predictor_native"
19
+ PREVIEW_SIZE = (224, 224)
20
+ MAX_UPLOAD_BYTES = 20 * 1024 * 1024 # Allow up to 20 MB now
21
+
22
+ ex1_path = 'IMG_0059.png'
23
+ ex2_path = 'IMG_0064.png'
24
+ ex3_path = 'IMG_8689.jpg'
25
+ ex1 = Image.open(ex1_path)
26
+ ex2 = Image.open(ex2_path)
27
+ ex3 = Image.open(ex3_path)
28
+ EXAMPLE_IMAGES = [ex1, ex2, ex3]
29
+
30
+ CLASS_LABELS = {0: "Does not have stop sign", 1: "Has stop sign"}
31
+
32
+ # Download & load predictor
33
+ def _download_and_extract_predictor() -> str:
34
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
35
+ local_zip = hf_hub_download(
36
+ repo_id=MODEL_REPO_ID,
37
+ filename=ZIP_FILENAME,
38
+ repo_type="model",
39
+ local_dir=str(CACHE_DIR),
40
+ local_dir_use_symlinks=False,
41
+ )
42
+ if EXTRACT_DIR.exists():
43
+ shutil.rmtree(EXTRACT_DIR)
44
+ EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
45
+ with zipfile.ZipFile(local_zip, "r") as zf:
46
+ zf.extractall(str(EXTRACT_DIR))
47
+ contents = list(EXTRACT_DIR.iterdir())
48
+ predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR
49
+ return str(predictor_root)
50
+
51
+ def load_predictor() -> agmm.MultiModalPredictor:
52
+ predictor_root = _download_and_extract_predictor()
53
+ return agmm.MultiModalPredictor.load(predictor_root)
54
+
55
+ PREDICTOR = load_predictor()
56
+
57
+ # Helpers
58
+ def pil_preprocess_preview(pil_img: Image.Image, target_size=PREVIEW_SIZE) -> Image.Image:
59
+ return pil_img.convert("RGB").resize(target_size, Image.BILINEAR)
60
+
61
+ def run_predict_binary(predictor, pil_img: Image.Image):
62
+ tmpd = pathlib.Path(tempfile.mkdtemp())
63
+ tmp_path = tmpd / "input.png"
64
+ pil_img.save(tmp_path)
65
+ input_df = pd.DataFrame({"image": [str(tmp_path)]})
66
+
67
+ probs_df = predictor.predict_proba(input_df)
68
+ row = probs_df.iloc[0]
69
+
70
+ # Map to {label string: probability}
71
+ prob_dict = {
72
+ CLASS_LABELS[0]: float(row[0]),
73
+ CLASS_LABELS[1]: float(row[1]),
74
+ }
75
+ # Pick higher one
76
+ pred_label = CLASS_LABELS[int(row.idxmax())]
77
+
78
+ try:
79
+ shutil.rmtree(tmpd)
80
+ except Exception:
81
+ pass
82
+
83
+ return pred_label, prob_dict
84
+
85
+ # Gradio callback
86
+ def infer_and_display(image: Image.Image):
87
+ if image is None:
88
+ return None, None, "No image provided.", {}
89
+
90
+ # Resize large uploads automatically
91
+ bio = io.BytesIO()
92
+ image.save(bio, format="PNG")
93
+ if len(bio.getvalue()) > MAX_UPLOAD_BYTES:
94
+ max_side = 1024
95
+ image.thumbnail((max_side, max_side))
96
+
97
+ preview = pil_preprocess_preview(image, PREVIEW_SIZE)
98
+ pred_label, probs = run_predict_binary(PREDICTOR, image)
99
+ return image, preview, f"Prediction: {pred_label}", probs
100
+
101
+ # Build Gradio interface
102
+ with gr.Blocks() as demo:
103
+ gr.Markdown("# Stop Sign Detection — AutoGluon Predictor")
104
+ gr.Markdown(
105
+ "Upload an image or pick one of the examples. "
106
+ "The app shows the original and preprocessed images, and predicts whether the image **has a stop sign**."
107
+ )
108
+
109
+ with gr.Row():
110
+ with gr.Column(scale=1):
111
+ image_in = gr.Image(type="pil", label="Upload an image", sources="upload")
112
+ run_btn = gr.Button("Run inference")
113
+ gr.Examples(EXAMPLE_IMAGES, inputs=[image_in], label="Example images", cache_examples=False)
114
+ with gr.Column(scale=1):
115
+ gr.Markdown("**Original image**")
116
+ orig_out = gr.Image(type="pil")
117
+ gr.Markdown("**Preprocessed image (preview)**")
118
+ pre_out = gr.Image(type="pil")
119
+
120
+ out_text = gr.Textbox(label="Prediction", interactive=False)
121
+ proba_label = gr.Label(label="Class probabilities")
122
+
123
+ run_btn.click(fn=infer_and_display, inputs=[image_in], outputs=[orig_out, pre_out, out_text, proba_label])
124
+
125
+ if __name__ == "__main__":
126
+ demo.launch()
127
+