InvokeAI icon indicating copy to clipboard operation
InvokeAI copied to clipboard

[bug]: Unknown LORA Type

Open freelancer2000 opened this issue 4 months ago • 7 comments

Is there an existing issue for this problem?

  • [x] I have searched the existing issues

Install method

Invoke's Launcher

Operating system

Windows

GPU vendor

Nvidia (CUDA)

GPU model

RTX 3080

GPU VRAM

10 GB

Version number

6.3.0

Browser

Chrome

System Information

{ "version": "6.3.0", "dependencies": { "absl-py" : "2.3.1", "accelerate" : "1.10.0", "annotated-types" : "0.7.0", "anyio" : "4.10.0", "attrs" : "25.3.0", "bidict" : "0.23.1", "bitsandbytes" : "0.46.1", "blake3" : "1.0.5", "certifi" : "2022.12.7", "cffi" : "1.17.1", "charset-normalizer" : "2.1.1", "click" : "8.2.1", "colorama" : "0.4.6", "coloredlogs" : "15.0.1", "compel" : "2.1.1", "contourpy" : "1.3.3", "CUDA" : "12.8", "cycler" : "0.12.1", "Deprecated" : "1.2.18", "diffusers" : "0.33.0", "dnspython" : "2.7.0", "dynamicprompts" : "0.31.0", "einops" : "0.8.1", "fastapi" : "0.116.1", "fastapi-events" : "0.12.2", "filelock" : "3.13.1", "flatbuffers" : "25.2.10", "fonttools" : "4.59.0", "fsspec" : "2024.6.1", "gguf" : "0.17.1", "h11" : "0.16.0", "httptools" : "0.6.4", "huggingface-hub" : "0.34.3", "humanfriendly" : "10.0", "idna" : "3.4", "importlib_metadata" : "7.1.0", "invisible-watermark" : "0.2.0", "InvokeAI" : "6.3.0", "jax" : "0.7.0", "jaxlib" : "0.7.0", "Jinja2" : "3.1.4", "kiwisolver" : "1.4.8", "MarkupSafe" : "2.1.5", "matplotlib" : "3.10.5", "mediapipe" : "0.10.14", "ml_dtypes" : "0.5.3", "mpmath" : "1.3.0", "networkx" : "3.3", "numpy" : "1.26.3", "onnx" : "1.16.1", "onnxruntime" : "1.19.2", "opencv-contrib-python": "4.11.0.86", "opencv-python" : "4.11.0.86", "opt_einsum" : "3.4.0", "packaging" : "24.1", "picklescan" : "0.0.26", "pillow" : "11.0.0", "prompt_toolkit" : "3.0.51", "protobuf" : "4.25.8", "psutil" : "7.0.0", "pycparser" : "2.22", "pydantic" : "2.11.7", "pydantic-settings" : "2.10.1", "pydantic_core" : "2.33.2", "pyparsing" : "3.2.3", "PyPatchMatch" : "1.0.2", "pyreadline3" : "3.5.4", "python-dateutil" : "2.9.0.post0", "python-dotenv" : "1.1.1", "python-engineio" : "4.12.2", "python-multipart" : "0.0.20", "python-socketio" : "5.13.0", "PyWavelets" : "1.9.0", "PyYAML" : "6.0.2", "regex" : "2025.7.34", "requests" : "2.28.1", "safetensors" : "0.6.1", "scipy" : "1.16.1", "semver" : "3.0.4", "sentencepiece" : "0.2.0", "setuptools" : "70.2.0", "simple-websocket" : "1.1.0", "six" : "1.17.0", "sniffio" : "1.3.1", "sounddevice" : "0.5.2", "spandrel" : "0.4.1", "starlette" : "0.47.2", "sympy" : "1.13.3", "tokenizers" : "0.21.4", "torch" : "2.7.1+cu128", "torchsde" : "0.2.6", "torchvision" : "0.22.1+cu128", "tqdm" : "4.66.5", "trampoline" : "0.1.2", "transformers" : "4.55.0", "typing-inspection" : "0.4.1", "typing_extensions" : "4.12.2", "urllib3" : "1.26.13", "uvicorn" : "0.35.0", "watchfiles" : "1.1.0", "wcwidth" : "0.2.13", "websockets" : "15.0.1", "wrapt" : "1.17.2", "wsproto" : "1.2.0", "zipp" : "3.19.2" }, "config": { "schema_version": "4.0.2", "legacy_models_yaml_path": null, "host": "0.0.0.0", "port": 9090, "allow_origins": [], "allow_credentials": true, "allow_methods": [""], "allow_headers": [""], "ssl_certfile": null, "ssl_keyfile": null, "log_tokenization": false, "patchmatch": true, "models_dir": "models", "convert_cache_dir": "models\.convert_cache", "download_cache_dir": "models\.download_cache", "legacy_conf_dir": "configs", "db_dir": "databases", "outputs_dir": "outputs", "custom_nodes_dir": "nodes", "style_presets_dir": "style_presets", "workflow_thumbnails_dir": "workflow_thumbnails", "log_handlers": ["console"], "log_format": "color", "log_level": "info", "log_sql": false, "log_level_network": "warning", "use_memory_db": false, "dev_reload": false, "profile_graphs": false, "profile_prefix": null, "profiles_dir": "profiles", "max_cache_ram_gb": 50, "max_cache_vram_gb": null, "log_memory_usage": false, "device_working_mem_gb": 3, "enable_partial_loading": true, "keep_ram_copy_of_weights": true, "ram": null, "vram": null, "lazy_offload": true, "pytorch_cuda_alloc_conf": "backend:cudaMallocAsync", "device": "auto", "precision": "auto", "sequential_guidance": false, "attention_type": "auto", "attention_slice_size": "auto", "force_tiled_decode": false, "pil_compress_level": 1, "max_queue_size": 10000, "clear_queue_on_startup": false, "allow_nodes": null, "deny_nodes": null, "node_cache_size": 512, "hashing_algorithm": "blake3_single", "remote_api_tokens": null, "scan_models_on_startup": false, "unsafe_disable_picklescan": false }, "set_config_fields": [ "host", "pytorch_cuda_alloc_conf", "legacy_models_yaml_path", "max_cache_ram_gb", "enable_partial_loading" ] }

What happened

When trying to install the Kontext LORA from this link : https://civitai.com/models/1871505/kontext-reality-transform-by-aldniki?modelVersionId=2118275 The following error is displayed:

Model install error Unknown LoRA type

What you expected to happen

Successfully install and be used with Flux Kontext

How to reproduce the problem

Start the Invoke app. Download from CivitAI and point Invoke to install the safetensor file. Get the error.

Additional context

In Ruined Fooocus it seems to work fine though.

Discord username

lancermaster

freelancer2000 avatar Aug 18 '25 19:08 freelancer2000

Updated to 6.4.0, LORA type still unknown / is not supported

freelancer2000 avatar Aug 19 '25 12:08 freelancer2000

Hi, I'm getting the same error with this model with both local and CivitAI installation. It works in SwarmUI https://civitai.com/models/1128288/dramatic-lighting-slider-illustrious

HugePurpleVeiny avatar Aug 25 '25 22:08 HugePurpleVeiny

same for Lora ID 1594293

furu00 avatar Sep 11 '25 01:09 furu00

Hi, I'm getting the same error with this model with both local and CivitAI installation. It works in SwarmUI https://civitai.com/models/1128288/dramatic-lighting-slider-illustrious

I think this one is a SDXL LoRA, not flux compatible. I assembled some tools to 'fix' LoRAs for InvokeAI and this is what it reported for that model:

 % python test_invokeai_compatibility.py /Users/andre/Documents/machine_learning/lora/Dramatic\ Lighting\ Slider.safetensors

======================================================================
InvokeAI LoRA Compatibility Test
======================================================================
Testing 1 file(s)...


✗ Dramatic Lighting Slider.safetensors
   Format: Stable Diffusion (not Flux)
   Size: 7.74 MB
   Keys: 840 total
   Rank: Unknown
   Key breakdown:
      - lora_unet: 840
      - transformer: 840
   Issues:
      - This is a Stable Diffusion (SD1.5/SDXL) LoRA, not a Flux LoRA
      - SD LoRAs use down_blocks/up_blocks architecture, incompatible with Flux
   Sample keys:
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k.alpha
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k.lora_down.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k.lora_up.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0.alpha
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0.lora_down.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0.lora_up.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q.alpha
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q.lora_down.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q.lora_up.weight
      - lora_unet_down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v.alpha

======================================================================
Summary:
  Total files: 1
  ✓ Compatible: 0
  ✗ Incompatible: 1
======================================================================

dreness avatar Oct 13 '25 09:10 dreness

What happened

When trying to install the Kontext LORA from this link : https://civitai.com/models/1871505/kontext-reality-transform-by-aldniki?modelVersionId=2118275 The following error is displayed:

Model install error Unknown LoRA type

I have a tool that can fix this.

 % python repair_lora.py /Users/andre/Documents/machine_learning/lora/aldniki_reality_transform_v01.safetensors 
======================================================================
Flux LoRA Repair Tool
======================================================================

Input:  /Users/andre/Documents/machine_learning/lora/aldniki_reality_transform_v01.safetensors
Output: /Users/andre/Documents/machine_learning/lora/aldniki_reality_transform_v01_fixed.safetensors

Analyzing file...
Issue detected: Non-standard final_layer keys
Strategy: Remove non-standard final_layer keys

Applying fix: Removing non-standard final_layer keys


======================================================================
Pass 2: Re-analyzing file...
======================================================================

Re-analyzing file...

Validating repair...

File size:
  Original: 292.58 MB
  Repaired: 292.39 MB

Fixes applied (1):
  - final_layer_keys: Removed 2 non-standard final_layer key(s)

======================================================================
✓ Repair successful!
======================================================================

The repaired file is now compatible with InvokeAI.
Validation result: All keys match Flux Kohya format (InvokeAI strict check)

Test it with: python test_invokeai_compatibility.py "/Users/andre/Documents/machine_learning/lora/aldniki_reality_transform_v01_fixed.safetensors"

dreness avatar Oct 13 '25 09:10 dreness

same for Lora ID 1594293

This one loads for me without any changes. My inspector tool says:

% python test_invokeai_compatibility.py /Users/andre/Documents/machine_learning/lora/Tennis_love_story_two_boys_OCs-000017.safetensors

======================================================================
InvokeAI LoRA Compatibility Test
======================================================================
Testing 1 file(s)...


✓ Tennis_love_story_two_boys_OCs-000017.safetensors
   Format: Flux Diffusers
   Size: 217.87 MB
   Keys: 2958 total
   Rank: 32

======================================================================
Summary:
  Total files: 1
  ✓ Compatible: 1
  ✗ Incompatible: 0

  Format breakdown:
    - Flux Diffusers: 1
======================================================================

dreness avatar Oct 13 '25 10:10 dreness

same for Lora ID 1594293

This one loads for me without any changes. My inspector tool says:

% python test_invokeai_compatibility.py /Users/andre/Documents/machine_learning/lora/Tennis_love_story_two_boys_OCs-000017.safetensors

======================================================================
InvokeAI LoRA Compatibility Test
======================================================================
Testing 1 file(s)...


✓ Tennis_love_story_two_boys_OCs-000017.safetensors
   Format: Flux Diffusers
   Size: 217.87 MB
   Keys: 2958 total
   Rank: 32

======================================================================
Summary:
  Total files: 1
  ✓ Compatible: 1
  ✗ Incompatible: 0

  Format breakdown:
    - Flux Diffusers: 1
======================================================================

Thanks for the help. I just noticed that Civitai uses different ID numbers in their download links than in their website URL's. So the Lora i was talking about is https://civitai.com/api/download/models/1594293?type=Model&format=SafeTensor I just tried it again an Invoke can't import it

furu00 avatar Oct 13 '25 17:10 furu00