phxdev Claude commited on
Commit
5b9c385
·
1 Parent(s): 34c499b

Fix compatibility issues with stable diffusers version

Browse files

- Use stable diffusers 0.30.3 instead of git dev version
- Use transformers 4.44.2 for compatibility
- Remove PEFT dependency causing import errors
- Switch back to single LoRA selection instead of simultaneous loading
- Download all LoRAs at startup but load one at a time

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (2) hide show
  1. app.py +22 -26
  2. requirements.txt +2 -3
app.py CHANGED
@@ -40,12 +40,11 @@ def download_lora_from_url(url, filename):
40
  print(f"Downloaded {filename}")
41
  return filename
42
 
43
- def preload_and_load_all_loras():
44
- """Download and load all LoRAs simultaneously at startup"""
45
  global loaded_loras
46
 
47
- print("Downloading and loading all LoRAs...")
48
- adapters_to_load = []
49
 
50
  for lora_name, lora_path in LORAS.items():
51
  if lora_name == "None" or lora_path is None:
@@ -57,28 +56,12 @@ def preload_and_load_all_loras():
57
  lora_path = download_lora_from_url(lora_path, filename)
58
 
59
  loaded_loras[lora_name] = lora_path
60
- adapters_to_load.append(lora_path)
61
  print(f"Downloaded {lora_name}")
62
 
63
- # Load all LoRAs with different adapter names
64
- for i, lora_path in enumerate(adapters_to_load):
65
- try:
66
- adapter_name = f"adapter_{i}"
67
- pipe.load_lora_weights(lora_path, adapter_name=adapter_name)
68
- print(f"Loaded adapter {adapter_name}")
69
- except Exception as e:
70
- print(f"Failed to load {lora_path}: {e}")
71
-
72
- # Set all adapters as active
73
- try:
74
- adapter_names = [f"adapter_{i}" for i in range(len(adapters_to_load))]
75
- pipe.set_adapters(adapter_names)
76
- print(f"All {len(adapters_to_load)} LoRAs active!")
77
- except Exception as e:
78
- print(f"Failed to activate adapters: {e}")
79
 
80
- # Load all LoRAs at startup
81
- preload_and_load_all_loras()
82
 
83
  torch.cuda.empty_cache()
84
 
@@ -88,11 +71,19 @@ MAX_IMAGE_SIZE = 2048
88
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
89
 
90
  @spaces.GPU(duration=75)
91
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
92
  if randomize_seed:
93
  seed = random.randint(0, MAX_SEED)
94
  generator = torch.Generator().manual_seed(seed)
95
 
 
 
 
 
 
 
 
 
96
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
97
  prompt=prompt,
98
  guidance_scale=guidance_scale,
@@ -142,7 +133,12 @@ with gr.Blocks(css=css) as demo:
142
 
143
  with gr.Accordion("Advanced Settings", open=False):
144
 
145
- gr.Markdown("**LoRAs Active:** All LoRAs are loaded and active simultaneously")
 
 
 
 
 
146
 
147
  seed = gr.Slider(
148
  label="Seed",
@@ -201,7 +197,7 @@ with gr.Blocks(css=css) as demo:
201
  gr.on(
202
  triggers=[run_button.click, prompt.submit],
203
  fn = infer,
204
- inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
205
  outputs = [result, seed]
206
  )
207
 
 
40
  print(f"Downloaded {filename}")
41
  return filename
42
 
43
+ def preload_loras():
44
+ """Download all LoRAs at startup for later use"""
45
  global loaded_loras
46
 
47
+ print("Downloading all LoRAs...")
 
48
 
49
  for lora_name, lora_path in LORAS.items():
50
  if lora_name == "None" or lora_path is None:
 
56
  lora_path = download_lora_from_url(lora_path, filename)
57
 
58
  loaded_loras[lora_name] = lora_path
 
59
  print(f"Downloaded {lora_name}")
60
 
61
+ print(f"All {len(loaded_loras)} LoRAs downloaded and ready!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ # Download all LoRAs at startup
64
+ preload_loras()
65
 
66
  torch.cuda.empty_cache()
67
 
 
71
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
72
 
73
  @spaces.GPU(duration=75)
74
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, lora_selection="None", progress=gr.Progress(track_tqdm=True)):
75
  if randomize_seed:
76
  seed = random.randint(0, MAX_SEED)
77
  generator = torch.Generator().manual_seed(seed)
78
 
79
+ # Load selected LoRA
80
+ if lora_selection != "None" and lora_selection in loaded_loras:
81
+ try:
82
+ pipe.load_lora_weights(loaded_loras[lora_selection])
83
+ pipe.fuse_lora(lora_scale=1.0)
84
+ except Exception as e:
85
+ print(f"Failed to load LoRA {lora_selection}: {e}")
86
+
87
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
88
  prompt=prompt,
89
  guidance_scale=guidance_scale,
 
133
 
134
  with gr.Accordion("Advanced Settings", open=False):
135
 
136
+ lora_selection = gr.Dropdown(
137
+ label="LoRA",
138
+ choices=list(LORAS.keys()),
139
+ value="None",
140
+ info="Select a LoRA to enhance image generation"
141
+ )
142
 
143
  seed = gr.Slider(
144
  label="Seed",
 
197
  gr.on(
198
  triggers=[run_button.click, prompt.submit],
199
  fn = infer,
200
+ inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_selection],
201
  outputs = [result, seed]
202
  )
203
 
requirements.txt CHANGED
@@ -1,8 +1,7 @@
1
  accelerate
2
- git+https://github.com/huggingface/diffusers.git
3
  torch
4
- transformers>=4.44.0
5
  xformers
6
  sentencepiece
7
- peft
8
  requests
 
1
  accelerate
2
+ diffusers==0.30.3
3
  torch
4
+ transformers==4.44.2
5
  xformers
6
  sentencepiece
 
7
  requests