Which V5 to use?

#6
by Pr0f3ssi0n4ln00b - opened

In a HF space with Phr00t’s Rapid AIO v19 extracted transformers.
Original 2511 or one of the merges? 16 or 32?

I’m getting som errors about missing alpha with that one when running through diffusers on zerogpu. The pure 2511 version works.

Could you try adjusting my code, It was working, but I haven't had time to update it since.

https://huggingface.co/spaces/Alissonerdx/BFS-Best-Face-Swap/tree/main

I can’t contribute/push commits with dev mode activated.

Here’s a change to the converter you could try, but I don’t know if it’s going to work.

# convert_lora.py
import torch
import safetensors.torch

UP_SUFFIXES = (".lora_up.weight", ".lora_A.weight")
DOWN_SUFFIXES = (".lora_down.weight", ".lora_B.weight")
ALPHA_SUFFIX = ".alpha"

def _map_key(k: str) -> str:
    # map common comfy/kohya -> diffusers-ish naming
    nk = k
    nk = nk.replace("model.diffusion_model.", "transformer.")
    nk = nk.replace("diffusion_model.", "transformer.")
    nk = nk.replace("lora_unet.", "transformer.")
    nk = nk.replace("lora_unet_", "transformer.")
    return nk

def _collect_prefixes(keys):
    prefixes = set()
    for k in keys:
        for suf in UP_SUFFIXES + DOWN_SUFFIXES:
            if k.endswith(suf):
                prefixes.add(k[:-len(suf)])
    return prefixes

def _infer_rank(state, prefix: str) -> int:
    # rank usually equals first dim of down (or up)
    for suf in DOWN_SUFFIXES:
        t = state.get(prefix + suf)
        if t is not None and hasattr(t, "shape") and len(t.shape) > 0:
            return int(t.shape[0])
    for suf in UP_SUFFIXES:
        t = state.get(prefix + suf)
        if t is not None and hasattr(t, "shape") and len(t.shape) > 0:
            return int(t.shape[0])
    return 1

def convert_comfyui_lora_to_diffusers(input_path: str, output_path: str):
    original = safetensors.torch.load_file(input_path)

    # 1) map keys (but don't drop alpha yet)
    mapped = {}
    for k, v in original.items():
        mapped[_map_key(k)] = v

    prefixes = _collect_prefixes(mapped.keys())

    # 2) remove orphan alphas (alpha without a corresponding up/down pair)
    for k in list(mapped.keys()):
        if k.endswith(ALPHA_SUFFIX):
            prefix = k[:-len(ALPHA_SUFFIX)]
            if prefix not in prefixes:
                mapped.pop(k)

    # 3) add missing alphas (alpha missing but up/down exist)
    for p in prefixes:
        akey = p + ALPHA_SUFFIX
        if akey not in mapped:
            rank = _infer_rank(mapped, p)
            mapped[akey] = torch.tensor(float(rank), dtype=torch.float32)

    safetensors.torch.save_file(mapped, output_path, metadata={"format": "diffusers"})
    return output_path

That got formatted horribly, sorry.

E: fixed formatting

Update: I got the v5 rank32 FP32 merge working in spaces now by filtering out deltas and remapping up/down keys for the transformer. I have no idea if it is the best fix but it works. Check out my space if you want.

Sign up or log in to comment