RyeCatcher commited on
Commit
8164ab2
·
verified ·
1 Parent(s): 42f9ec9

Upload dj_reactor/main.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dj_reactor/main.py +583 -0
dj_reactor/main.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DJ Reactor - Music Visualizer for Reachy Mini
3
+
4
+ A physical music companion that analyzes audio and moves expressively to the beat.
5
+ Compatible with ReachyMiniApp format for HuggingFace/dashboard distribution.
6
+ """
7
+
8
+ import math
9
+ import time
10
+ import threading
11
+ import logging
12
+ from dataclasses import dataclass
13
+ from typing import Optional
14
+ from collections import deque
15
+
16
+ import numpy as np
17
+ import gradio as gr
18
+
19
+ try:
20
+ import sounddevice as sd
21
+ except ImportError:
22
+ sd = None
23
+
24
+ from reachy_mini import ReachyMini, ReachyMiniApp
25
+ from reachy_mini.utils import create_head_pose
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # =============================================================================
30
+ # Configuration
31
+ # =============================================================================
32
+
33
+ BASS_RANGE = (20, 250)
34
+ MID_RANGE = (250, 2000)
35
+ TREBLE_RANGE = (2000, 12000)
36
+
37
+
38
+ @dataclass
39
+ class GenrePreset:
40
+ """Movement characteristics for a music genre."""
41
+ name: str
42
+ display_name: str
43
+ head_bob_amplitude: float = 8.0
44
+ head_bob_speed: float = 1.0
45
+ body_sway_amplitude: float = 20.0
46
+ body_sway_speed: float = 1.0
47
+ antenna_amplitude: float = 0.5
48
+ emphasis_style: str = "nod" # headbang, nod, tilt
49
+ movement_smoothing: float = 0.3
50
+
51
+
52
+ GENRE_PRESETS = {
53
+ "rock": GenrePreset(
54
+ name="rock", display_name="Rock",
55
+ head_bob_amplitude=18.0, head_bob_speed=1.0,
56
+ body_sway_amplitude=70.0, antenna_amplitude=1.0,
57
+ emphasis_style="headbang", movement_smoothing=0.2,
58
+ ),
59
+ "electronic": GenrePreset(
60
+ name="electronic", display_name="Electronic/EDM",
61
+ head_bob_amplitude=15.0, head_bob_speed=1.0,
62
+ body_sway_amplitude=75.0, body_sway_speed=1.0,
63
+ antenna_amplitude=0.9, emphasis_style="nod",
64
+ movement_smoothing=0.25,
65
+ ),
66
+ "jazz": GenrePreset(
67
+ name="jazz", display_name="Jazz",
68
+ head_bob_amplitude=12.0, head_bob_speed=1.2,
69
+ body_sway_amplitude=80.0, body_sway_speed=1.2,
70
+ antenna_amplitude=0.7, emphasis_style="tilt",
71
+ movement_smoothing=0.35,
72
+ ),
73
+ "pop": GenrePreset(
74
+ name="pop", display_name="Pop",
75
+ head_bob_amplitude=14.0, head_bob_speed=1.0,
76
+ body_sway_amplitude=70.0, antenna_amplitude=0.8,
77
+ emphasis_style="nod", movement_smoothing=0.25,
78
+ ),
79
+ "classical": GenrePreset(
80
+ name="classical", display_name="Classical",
81
+ head_bob_amplitude=10.0, head_bob_speed=1.5,
82
+ body_sway_amplitude=85.0, body_sway_speed=1.5,
83
+ antenna_amplitude=0.6, emphasis_style="tilt",
84
+ movement_smoothing=0.4,
85
+ ),
86
+ "hiphop": GenrePreset(
87
+ name="hiphop", display_name="Hip-Hop",
88
+ head_bob_amplitude=16.0, head_bob_speed=0.9,
89
+ body_sway_amplitude=65.0, antenna_amplitude=0.8,
90
+ emphasis_style="nod", movement_smoothing=0.2,
91
+ ),
92
+ "chill": GenrePreset(
93
+ name="chill", display_name="Chill/Ambient",
94
+ head_bob_amplitude=10.0, head_bob_speed=1.5,
95
+ body_sway_amplitude=75.0, body_sway_speed=1.5,
96
+ antenna_amplitude=0.6, emphasis_style="tilt",
97
+ movement_smoothing=0.45,
98
+ ),
99
+ }
100
+
101
+
102
+ # =============================================================================
103
+ # Audio Analysis
104
+ # =============================================================================
105
+
106
+ @dataclass
107
+ class AudioFeatures:
108
+ """Extracted audio features for a single frame."""
109
+ bass: float = 0.0
110
+ mid: float = 0.0
111
+ treble: float = 0.0
112
+ rms: float = 0.0
113
+ beat_detected: bool = False
114
+ onset_strength: float = 0.0 # How strong the beat onset is
115
+ bpm: float = 120.0
116
+ beat_phase: float = 0.0 # 0-1, position within current beat cycle
117
+ is_silent: bool = True
118
+
119
+
120
+ class AudioAnalyzer:
121
+ """Real-time audio analysis for beat detection and frequency bands."""
122
+
123
+ def __init__(self, sample_rate: int = 44100, chunk_size: int = 2048,
124
+ device_index: Optional[int] = None, sensitivity: float = 0.6):
125
+ self.sample_rate = sample_rate
126
+ self.chunk_size = chunk_size
127
+ self.device_index = device_index
128
+ self.sensitivity = sensitivity
129
+
130
+ # FFT setup
131
+ freqs = np.fft.rfftfreq(chunk_size, 1.0 / sample_rate)
132
+ self.bass_bins = np.where((freqs >= BASS_RANGE[0]) & (freqs <= BASS_RANGE[1]))[0]
133
+ self.mid_bins = np.where((freqs >= MID_RANGE[0]) & (freqs <= MID_RANGE[1]))[0]
134
+ self.treble_bins = np.where((freqs >= TREBLE_RANGE[0]) & (freqs <= TREBLE_RANGE[1]))[0]
135
+
136
+ # Beat tracking
137
+ self.energy_history = deque(maxlen=10)
138
+ self.beat_times = deque(maxlen=50)
139
+ self.last_beat_time = 0.0
140
+ self.estimated_bpm = 120.0
141
+ self.beat_interval = 0.5 # seconds between beats (60/120 BPM)
142
+
143
+ # State
144
+ self.is_running = False
145
+ self.stream = None
146
+ self.latest_features = AudioFeatures()
147
+ self.start_time = 0.0
148
+
149
+ def _audio_callback(self, indata, frames, time_info, status):
150
+ """Process incoming audio data."""
151
+ if len(indata.shape) > 1:
152
+ audio = np.mean(indata, axis=1)
153
+ else:
154
+ audio = indata.flatten()
155
+
156
+ current_time = time.time() - self.start_time
157
+
158
+ # RMS energy
159
+ rms = np.sqrt(np.mean(audio ** 2))
160
+ is_silent = rms < 0.001
161
+
162
+ # FFT analysis
163
+ windowed = audio * np.hanning(len(audio))
164
+ if len(windowed) < self.chunk_size:
165
+ windowed = np.pad(windowed, (0, self.chunk_size - len(windowed)))
166
+ spectrum = np.abs(np.fft.rfft(windowed[:self.chunk_size]))
167
+
168
+ # Extract band energies (normalized for loopback audio)
169
+ bass = min(np.mean(spectrum[self.bass_bins]) / 3.0, 1.0) if len(self.bass_bins) > 0 else 0
170
+ mid = min(np.mean(spectrum[self.mid_bins]) / 2.0, 1.0) if len(self.mid_bins) > 0 else 0
171
+ treble = min(np.mean(spectrum[self.treble_bins]) / 1.0, 1.0) if len(self.treble_bins) > 0 else 0
172
+
173
+ # Beat detection with sensitivity
174
+ self.energy_history.append(rms)
175
+ beat_detected = False
176
+ onset_strength = 0.0
177
+ onset_threshold = 1.1 + (1.0 - self.sensitivity) * 0.5 # Higher sensitivity = lower threshold
178
+ min_interval = 0.2 + (1.0 - self.sensitivity) * 0.2
179
+
180
+ if len(self.energy_history) >= 3:
181
+ avg_energy = np.mean(list(self.energy_history)[:-1])
182
+ onset = rms / (avg_energy + 1e-10)
183
+ onset_strength = min(onset / onset_threshold, 2.0) # Normalized onset strength
184
+ if onset > onset_threshold and (current_time - self.last_beat_time) > min_interval and rms > 0.002:
185
+ beat_detected = True
186
+ self.beat_times.append(current_time)
187
+ self.last_beat_time = current_time
188
+ self._update_bpm()
189
+
190
+ # Calculate beat phase (0-1 position within beat cycle)
191
+ time_since_beat = current_time - self.last_beat_time
192
+ beat_phase = (time_since_beat / self.beat_interval) % 1.0 if self.beat_interval > 0 else 0.0
193
+
194
+ self.latest_features = AudioFeatures(
195
+ bass=bass, mid=mid, treble=treble,
196
+ rms=min(rms * 10, 1.0),
197
+ beat_detected=beat_detected,
198
+ onset_strength=onset_strength,
199
+ bpm=self.estimated_bpm,
200
+ beat_phase=beat_phase,
201
+ is_silent=is_silent
202
+ )
203
+
204
+ def _update_bpm(self):
205
+ """Estimate BPM from beat times."""
206
+ if len(self.beat_times) < 4:
207
+ return
208
+ times = list(self.beat_times)
209
+ intervals = [times[i+1] - times[i] for i in range(len(times) - 1)]
210
+ median = np.median(intervals)
211
+ valid = [i for i in intervals if 0.5 * median < i < 2 * median]
212
+ if valid:
213
+ avg_interval = np.mean(valid)
214
+ self.beat_interval = avg_interval
215
+ self.estimated_bpm = max(60, min(200, 60.0 / avg_interval))
216
+
217
+ def start(self):
218
+ """Start audio capture."""
219
+ if self.is_running or sd is None:
220
+ return
221
+ self.start_time = time.time()
222
+ self.is_running = True
223
+ self.stream = sd.InputStream(
224
+ device=self.device_index,
225
+ channels=1,
226
+ samplerate=self.sample_rate,
227
+ blocksize=self.chunk_size,
228
+ callback=self._audio_callback,
229
+ )
230
+ self.stream.start()
231
+ logger.info("Audio capture started")
232
+
233
+ def stop(self):
234
+ """Stop audio capture."""
235
+ self.is_running = False
236
+ if self.stream:
237
+ self.stream.stop()
238
+ self.stream.close()
239
+ self.stream = None
240
+
241
+ def get_latest(self) -> AudioFeatures:
242
+ """Get most recent audio features."""
243
+ return self.latest_features
244
+
245
+ def update_sensitivity(self, sensitivity: float):
246
+ """Update beat detection sensitivity."""
247
+ self.sensitivity = max(0.2, min(1.0, sensitivity))
248
+
249
+
250
+ def list_audio_devices():
251
+ """List available audio input devices."""
252
+ if sd is None:
253
+ return []
254
+ devices = []
255
+ for i, dev in enumerate(sd.query_devices()):
256
+ if dev['max_input_channels'] > 0:
257
+ devices.append({'index': i, 'name': dev['name']})
258
+ return devices
259
+
260
+
261
+ # =============================================================================
262
+ # Movement System
263
+ # =============================================================================
264
+
265
+ class DanceController:
266
+ """Maps audio to robot movements with genre-specific styles."""
267
+
268
+ def __init__(self, preset: GenrePreset, intensity: float = 0.7):
269
+ self.preset = preset
270
+ self.intensity = intensity
271
+ self.dance_time = 0.0
272
+
273
+ # Smoothing state
274
+ self.smooth_head_z = 0.0
275
+ self.smooth_head_roll = 0.0
276
+ self.smooth_body_yaw = 0.0
277
+ self.smooth_antenna_l = 0.0
278
+ self.smooth_antenna_r = 0.0
279
+
280
+ def update_preset(self, preset: GenrePreset):
281
+ """Change the active genre preset."""
282
+ self.preset = preset
283
+
284
+ def update_intensity(self, intensity: float):
285
+ """Update movement intensity."""
286
+ self.intensity = max(0.1, min(1.0, intensity))
287
+
288
+ def get_movement(self, features: AudioFeatures):
289
+ """Calculate movement based on audio features and genre preset."""
290
+ self.dance_time += 0.1 # For tilt alternation, matches loop rate
291
+ preset = self.preset
292
+
293
+ # Use beat_phase (0-1) synced to actual detected beats, convert to radians
294
+ phase = features.beat_phase * 2 * math.pi
295
+
296
+ # Base energy - always move big, audio makes it bigger
297
+ base_energy = 0.8 + 0.2 * features.rms # Always at least 80% movement
298
+ energy = base_energy * self.intensity
299
+
300
+ # BODY SWAY - huge sweeping motion
301
+ bass_boost = 0.8 + 0.5 * features.bass
302
+ body_target = preset.body_sway_amplitude * energy * bass_boost * math.sin(phase * preset.body_sway_speed)
303
+
304
+ # HEAD MOVEMENT - big dramatic bobbing and rolling
305
+ mid_boost = 0.7 + 0.6 * features.mid
306
+ head_z_target = preset.head_bob_amplitude * energy * 1.2 * math.sin(phase * preset.head_bob_speed)
307
+ head_roll_target = preset.head_bob_amplitude * 2.0 * energy * mid_boost * math.sin(phase * 0.5)
308
+
309
+ # Beat-triggered emphasis - really punch those beats
310
+ head_pitch = 0
311
+ if features.beat_detected:
312
+ strength = max(features.onset_strength, 1.2) * self.intensity
313
+ if preset.emphasis_style == "headbang":
314
+ head_pitch = -35 * strength
315
+ elif preset.emphasis_style == "nod":
316
+ head_pitch = -25 * strength
317
+ elif preset.emphasis_style == "tilt":
318
+ head_roll_target += 30 * strength * (1 if self.dance_time % 2 > 1 else -1)
319
+
320
+ # ANTENNAS - super bouncy and expressive
321
+ treble_boost = 0.6 + 0.6 * features.treble
322
+ ant_amp = preset.antenna_amplitude * energy * treble_boost * 2.0
323
+ antenna_l_target = ant_amp * math.sin(phase * 2)
324
+ antenna_r_target = ant_amp * math.sin(phase * 2 + math.pi)
325
+
326
+ # Smoothing - apply to all movements for fluid motion
327
+ smooth = preset.movement_smoothing
328
+ self.smooth_head_z = smooth * self.smooth_head_z + (1 - smooth) * head_z_target
329
+ self.smooth_head_roll = smooth * self.smooth_head_roll + (1 - smooth) * head_roll_target
330
+ self.smooth_body_yaw = smooth * self.smooth_body_yaw + (1 - smooth) * body_target
331
+ self.smooth_antenna_l = smooth * self.smooth_antenna_l + (1 - smooth) * antenna_l_target
332
+ self.smooth_antenna_r = smooth * self.smooth_antenna_r + (1 - smooth) * antenna_r_target
333
+
334
+ return {
335
+ 'head_z': max(-20, min(20, self.smooth_head_z)),
336
+ 'head_roll': max(-45, min(45, self.smooth_head_roll)),
337
+ 'head_pitch': max(-45, min(45, head_pitch)),
338
+ 'body_yaw': max(-55, min(55, self.smooth_body_yaw)),
339
+ 'antenna_left': max(-1.0, min(1.0, self.smooth_antenna_l)),
340
+ 'antenna_right': max(-1.0, min(1.0, self.smooth_antenna_r)),
341
+ }
342
+
343
+
344
+ # =============================================================================
345
+ # ReachyMiniApp
346
+ # =============================================================================
347
+
348
+ class ReachyMiniDjReactor(ReachyMiniApp):
349
+ """DJ Reactor - Music visualizer for Reachy Mini."""
350
+
351
+ custom_app_url: str | None = "http://localhost:7861"
352
+ dont_start_webserver: bool = True # We handle Gradio ourselves
353
+ request_media_backend: str | None = "no_media" # DJ Reactor doesn't need camera
354
+
355
+ def __init__(self):
356
+ super().__init__()
357
+ self.analyzer: Optional[AudioAnalyzer] = None
358
+ self.controller: Optional[DanceController] = None
359
+ self.is_vibing = False
360
+ self.current_genre = "electronic"
361
+ self.intensity = 0.7
362
+ self.sensitivity = 0.6
363
+ self.latest_features = AudioFeatures()
364
+
365
+ def run(self, reachy_mini: ReachyMini, stop_event: threading.Event):
366
+ """Main loop - called by dashboard."""
367
+
368
+ # Start Gradio UI in background
369
+ ui_thread = threading.Thread(
370
+ target=self._run_ui,
371
+ args=(reachy_mini, stop_event),
372
+ daemon=True
373
+ )
374
+ ui_thread.start()
375
+
376
+ # Main dance loop
377
+ while not stop_event.is_set():
378
+ if self.is_vibing and self.analyzer and self.controller:
379
+ features = self.analyzer.get_latest()
380
+ self.latest_features = features
381
+
382
+ if not features.is_silent:
383
+ movement = self.controller.get_movement(features)
384
+ try:
385
+ head_pose = create_head_pose(
386
+ z=movement['head_z'],
387
+ roll=movement['head_roll'],
388
+ mm=True,
389
+ degrees=True
390
+ )
391
+ reachy_mini.goto_target(
392
+ head=head_pose,
393
+ antennas=[movement['antenna_left'], movement['antenna_right']],
394
+ body_yaw=np.deg2rad(movement['body_yaw']),
395
+ duration=0.12,
396
+ method="minjerk"
397
+ )
398
+ except Exception as e:
399
+ logger.debug(f"Movement error: {e}")
400
+
401
+ time.sleep(0.1) # 10fps - let movements complete before next command
402
+
403
+ # Cleanup
404
+ if self.analyzer:
405
+ self.analyzer.stop()
406
+
407
+ def _run_ui(self, reachy_mini: ReachyMini, stop_event: threading.Event):
408
+ """Run Gradio UI with genre selection and visualizers."""
409
+ devices = list_audio_devices()
410
+ device_names = [d['name'] for d in devices]
411
+ genre_choices = [(p.display_name, name) for name, p in GENRE_PRESETS.items()]
412
+
413
+ def start_vibing(device_name, genre, intensity, sensitivity):
414
+ if self.is_vibing:
415
+ return get_status()
416
+
417
+ # Find device index
418
+ device_idx = None
419
+ for d in devices:
420
+ if d['name'] == device_name:
421
+ device_idx = d['index']
422
+ break
423
+
424
+ self.current_genre = genre
425
+ self.intensity = intensity
426
+ self.sensitivity = sensitivity
427
+
428
+ preset = GENRE_PRESETS.get(genre, GENRE_PRESETS["electronic"])
429
+ self.controller = DanceController(preset, intensity)
430
+ self.analyzer = AudioAnalyzer(device_index=device_idx, sensitivity=sensitivity)
431
+ self.analyzer.start()
432
+ self.is_vibing = True
433
+ return get_status()
434
+
435
+ def stop_vibing():
436
+ self.is_vibing = False
437
+ if self.analyzer:
438
+ self.analyzer.stop()
439
+ self.analyzer = None
440
+ return get_status()
441
+
442
+ def change_genre(genre):
443
+ self.current_genre = genre
444
+ if self.controller:
445
+ preset = GENRE_PRESETS.get(genre, GENRE_PRESETS["electronic"])
446
+ self.controller.update_preset(preset)
447
+ return get_status()
448
+
449
+ def update_intensity(intensity):
450
+ self.intensity = intensity
451
+ if self.controller:
452
+ self.controller.update_intensity(intensity)
453
+
454
+ def update_sensitivity(sensitivity):
455
+ self.sensitivity = sensitivity
456
+ if self.analyzer:
457
+ self.analyzer.update_sensitivity(sensitivity)
458
+
459
+ def bar(value, color):
460
+ """Generate a colored bar HTML."""
461
+ width = int(value * 100)
462
+ return f'''<div style="background: #e0e0e0; border-radius: 4px; overflow: hidden;">
463
+ <div style="width: {width}%; height: 12px; background: {color}; transition: width 0.1s;"></div>
464
+ </div>'''
465
+
466
+ def get_status():
467
+ f = self.latest_features
468
+ status = "Vibing!" if self.is_vibing else "Ready"
469
+ status_color = "#4CAF50" if self.is_vibing else "#666"
470
+ beat_indicator = " *" if f.beat_detected and self.is_vibing else ""
471
+ genre_name = GENRE_PRESETS.get(self.current_genre, GENRE_PRESETS["electronic"]).display_name
472
+
473
+ return f"""
474
+ <div style="padding: 15px;">
475
+ <div style="text-align: center; margin-bottom: 15px;">
476
+ <div style="font-size: 18px; font-weight: bold; color: {status_color};">
477
+ {status}{beat_indicator}
478
+ </div>
479
+ <div style="font-size: 12px; color: #888; margin-top: 4px;">{genre_name}</div>
480
+ </div>
481
+
482
+ <div style="text-align: center; margin-bottom: 20px;">
483
+ <div style="font-size: 48px; font-family: monospace; font-weight: bold;">
484
+ {f.bpm:.0f}
485
+ </div>
486
+ <div style="font-size: 14px; color: #666;">BPM</div>
487
+ </div>
488
+
489
+ <div style="background: #f5f5f5; padding: 15px; border-radius: 10px;">
490
+ <div style="margin-bottom: 12px;">
491
+ <div style="font-size: 12px; color: #666; margin-bottom: 4px;">Bass</div>
492
+ {bar(f.bass, '#e91e63')}
493
+ </div>
494
+ <div style="margin-bottom: 12px;">
495
+ <div style="font-size: 12px; color: #666; margin-bottom: 4px;">Mid</div>
496
+ {bar(f.mid, '#9c27b0')}
497
+ </div>
498
+ <div style="margin-bottom: 12px;">
499
+ <div style="font-size: 12px; color: #666; margin-bottom: 4px;">Treble</div>
500
+ {bar(f.treble, '#3f51b5')}
501
+ </div>
502
+ <div>
503
+ <div style="font-size: 12px; color: #666; margin-bottom: 4px;">Energy</div>
504
+ {bar(f.rms, '#4CAF50')}
505
+ </div>
506
+ </div>
507
+ </div>
508
+ """
509
+
510
+ with gr.Blocks(title="DJ Reactor", theme=gr.themes.Soft()) as demo:
511
+ gr.HTML("""
512
+ <div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
513
+ color: white; padding: 25px; border-radius: 15px; text-align: center;
514
+ margin-bottom: 15px;">
515
+ <h1 style="margin: 0; font-size: 32px;">DJ Reactor</h1>
516
+ <p style="margin: 8px 0 0 0; opacity: 0.9;">Reachy Mini dances to your music</p>
517
+ </div>
518
+ """)
519
+
520
+ with gr.Row():
521
+ with gr.Column(scale=1):
522
+ device_dropdown = gr.Dropdown(
523
+ choices=device_names,
524
+ value=device_names[0] if device_names else None,
525
+ label="Audio Input Device",
526
+ info="Select microphone or loopback"
527
+ )
528
+
529
+ genre_radio = gr.Radio(
530
+ choices=genre_choices,
531
+ value="electronic",
532
+ label="Music Genre",
533
+ info="Movement style"
534
+ )
535
+
536
+ intensity_slider = gr.Slider(
537
+ 0.1, 1.0, value=0.7, step=0.1,
538
+ label="Movement Intensity",
539
+ info="How dramatic the movements"
540
+ )
541
+
542
+ sensitivity_slider = gr.Slider(
543
+ 0.2, 1.0, value=0.6, step=0.1,
544
+ label="Beat Sensitivity",
545
+ info="How easily beats are detected"
546
+ )
547
+
548
+ with gr.Row():
549
+ start_btn = gr.Button("Start Vibing", variant="primary", size="lg")
550
+ stop_btn = gr.Button("Stop", size="lg")
551
+
552
+ with gr.Column(scale=1):
553
+ status_html = gr.HTML(value=get_status())
554
+
555
+ # Auto-refresh status
556
+ timer = gr.Timer(value=0.3)
557
+ timer.tick(fn=get_status, outputs=[status_html])
558
+
559
+ # Event handlers
560
+ start_btn.click(
561
+ fn=start_vibing,
562
+ inputs=[device_dropdown, genre_radio, intensity_slider, sensitivity_slider],
563
+ outputs=[status_html]
564
+ )
565
+ stop_btn.click(fn=stop_vibing, outputs=[status_html])
566
+ genre_radio.change(fn=change_genre, inputs=[genre_radio], outputs=[status_html])
567
+ intensity_slider.change(fn=update_intensity, inputs=[intensity_slider])
568
+ sensitivity_slider.change(fn=update_sensitivity, inputs=[sensitivity_slider])
569
+
570
+ demo.launch(server_port=7861, quiet=True, prevent_thread_lock=True)
571
+
572
+ # Wait for stop
573
+ while not stop_event.is_set():
574
+ time.sleep(1)
575
+
576
+
577
+ # For standalone testing
578
+ if __name__ == "__main__":
579
+ app = ReachyMiniDjReactor()
580
+ try:
581
+ app.wrapped_run()
582
+ except KeyboardInterrupt:
583
+ app.stop()