Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -56,16 +56,16 @@ Rules:
|
|
| 56 |
|
| 57 |
Output only the final instruction in plain text and nothing else."""
|
| 58 |
|
| 59 |
-
# Model repository IDs for
|
| 60 |
-
REPO_ID_REGULAR = "diffusers-internal-dev/dummy-1015-
|
| 61 |
-
REPO_ID_DISTILLED = "diffusers-internal-dev/dummy-1015-
|
| 62 |
|
| 63 |
-
# Load both
|
| 64 |
-
print("Loading
|
| 65 |
pipe_regular = Flux2KleinPipeline.from_pretrained(REPO_ID_REGULAR, torch_dtype=dtype)
|
| 66 |
pipe_regular.to("cuda")
|
| 67 |
|
| 68 |
-
print("Loading
|
| 69 |
pipe_distilled = Flux2KleinPipeline.from_pretrained(REPO_ID_DISTILLED, torch_dtype=dtype)
|
| 70 |
pipe_distilled.to("cuda")
|
| 71 |
|
|
@@ -189,7 +189,7 @@ def infer(prompt, input_images=None, mode_choice="Distilled (4 steps)", seed=42,
|
|
| 189 |
print(f"Upsampled Prompt: {final_prompt}")
|
| 190 |
|
| 191 |
# 2. Image Generation
|
| 192 |
-
progress(0.2, desc=f"Generating image with
|
| 193 |
|
| 194 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 195 |
|
|
@@ -235,8 +235,8 @@ css = """
|
|
| 235 |
with gr.Blocks(css=css) as demo:
|
| 236 |
|
| 237 |
with gr.Column(elem_id="col-container"):
|
| 238 |
-
gr.Markdown(f"""# FLUX.2 [Klein] -
|
| 239 |
-
FLUX.2 [Klein] is a
|
| 240 |
""")
|
| 241 |
with gr.Row():
|
| 242 |
with gr.Column():
|
|
|
|
| 56 |
|
| 57 |
Output only the final instruction in plain text and nothing else."""
|
| 58 |
|
| 59 |
+
# Model repository IDs for 9B
|
| 60 |
+
REPO_ID_REGULAR = "diffusers-internal-dev/dummy-1015-9b"
|
| 61 |
+
REPO_ID_DISTILLED = "diffusers-internal-dev/dummy-1015-9b-distilled"
|
| 62 |
|
| 63 |
+
# Load both 9B models
|
| 64 |
+
print("Loading 9B Regular model...")
|
| 65 |
pipe_regular = Flux2KleinPipeline.from_pretrained(REPO_ID_REGULAR, torch_dtype=dtype)
|
| 66 |
pipe_regular.to("cuda")
|
| 67 |
|
| 68 |
+
print("Loading 9B Distilled model...")
|
| 69 |
pipe_distilled = Flux2KleinPipeline.from_pretrained(REPO_ID_DISTILLED, torch_dtype=dtype)
|
| 70 |
pipe_distilled.to("cuda")
|
| 71 |
|
|
|
|
| 189 |
print(f"Upsampled Prompt: {final_prompt}")
|
| 190 |
|
| 191 |
# 2. Image Generation
|
| 192 |
+
progress(0.2, desc=f"Generating image with 9B {mode_choice}...")
|
| 193 |
|
| 194 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 195 |
|
|
|
|
| 235 |
with gr.Blocks(css=css) as demo:
|
| 236 |
|
| 237 |
with gr.Column(elem_id="col-container"):
|
| 238 |
+
gr.Markdown(f"""# FLUX.2 [Klein] - 9B
|
| 239 |
+
FLUX.2 [Klein] is a distilled model capable of generating, editing and combining images based on text instructions [[model](https://huggingface.co/black-forest-labs/FLUX.2-dev)], [[blog](https://bfl.ai/blog/flux-2)]
|
| 240 |
""")
|
| 241 |
with gr.Row():
|
| 242 |
with gr.Column():
|