LTXVPromptEnhancer node lowvram err Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
when i use python main.py --gpu-only it's ok But this will undoubtedly make the graphics card with low video memory more difficult
err log //////////////////////////////////////////////////////////////////////////////////////////////////////////////
ComfyUI Error Report
Error Details
- Node ID: 1
- Node Type: LTXVPromptEnhancer
- Exception Type: RuntimeError
- Exception Message: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
Stack Trace
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 327, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 202, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 174, in _map_node_over_list
process_inputs(input_dict, i)
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 163, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 202, in enhance
enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 42, in forward
enhanced_prompt = generate_cinematic_prompt(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 87, in generate_cinematic_prompt
prompts = _generate_t2v_prompt(
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 160, in _generate_t2v_prompt
return _generate_and_decode_prompts(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 243, in _generate_and_decode_prompts
outputs = prompt_enhancer_model.generate(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2223, in generate
result = self._sample(
^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 3211, in _sample
outputs = self(**model_inputs, return_dict=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\deprecation.py", line 172, in wrapped_func
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 842, in forward
outputs = self.model(
^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 571, in forward
position_embeddings = self.rotary_emb(hidden_states, position_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 132, in forward
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
System Information
- ComfyUI Version: 0.3.26
- Arguments: ComfyUI\main.py --windows-standalone-build
- OS: nt
- Python Version: 3.11.9 (tags/v3.11.9:de54cf5, Apr 2 2024, 10:12:12) [MSC v.1938 64 bit (AMD64)]
- Embedded Python: true
- PyTorch Version: 2.6.0+cu126
Devices
- Name: cuda:0 NVIDIA GeForce RTX 4060 Ti : cudaMallocAsync
- Type: cuda
- VRAM Total: 8585216000
- VRAM Free: 2693038628
- Torch VRAM Total: 4697620480
- Torch VRAM Free: 13926948
Logs
2025-03-11T17:56:33.242935 -
2025-03-11T17:56:33.263943 - FalVideo.available
2025-03-11T17:56:33.263943 - [93m -------------- [0m
2025-03-11T17:56:33.268522 - ### Loading: noEmbryo nodes v1.0.52025-03-11T17:56:33.268522 -
2025-03-11T17:56:33.532149 - Please 'pip install xformers'2025-03-11T17:56:33.532149 -
2025-03-11T17:56:33.532149 - Nvidia APEX normalization not installed, using PyTorch LayerNorm2025-03-11T17:56:33.532149 -
2025-03-11T17:56:33.681369 - Failed to auto update `Quality of Life Suit` 2025-03-11T17:56:33.681369 -
2025-03-11T17:56:33.681369 - [33mQualityOfLifeSuit_Omar92_DIR:[0m E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-QualityOfLifeSuit_Omar922025-03-11T17:56:33.681369 -
2025-03-11T17:56:33.697002 - [0;33m[ReActor][0m - [38;5;173mSTATUS[0m - [0;32mRunning v0.5.2-b1 in ComfyUI[0m2025-03-11T17:56:33.697002 -
2025-03-11T17:56:33.831789 - Torch version: 2.6.0+cu1262025-03-11T17:56:33.847457 -
2025-03-11T17:56:34.830864 - no module 'xformers'. Processing without...2025-03-11T17:56:34.830864 -
2025-03-11T17:56:34.846488 - no module 'xformers'. Processing without...2025-03-11T17:56:34.846488 -
2025-03-11T17:56:35.170361 - (pysssss:WD14Tagger) [DEBUG] Available ORT providers: TensorrtExecutionProvider, CUDAExecutionProvider, CPUExecutionProvider2025-03-11T17:56:35.170361 -
2025-03-11T17:56:35.170361 - (pysssss:WD14Tagger) [DEBUG] Using ORT providers: CUDAExecutionProvider, CPUExecutionProvider2025-03-11T17:56:35.170361 -
2025-03-11T17:56:35.235211 - ------------------------------------------2025-03-11T17:56:35.235211 -
2025-03-11T17:56:35.235211 - [34mComfyroll Studio v1.76 : [92m 175 Nodes Loaded[0m2025-03-11T17:56:35.235211 -
2025-03-11T17:56:35.235211 - ------------------------------------------2025-03-11T17:56:35.235211 -
2025-03-11T17:56:35.235211 - ** For changes, please see patch notes at https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/main/Patch_Notes.md2025-03-11T17:56:35.235211 -
2025-03-11T17:56:35.235211 - ** For help, please see the wiki at https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki2025-03-11T17:56:35.235211 -
2025-03-11T17:56:35.235211 - ------------------------------------------2025-03-11T17:56:35.235211 -
2025-03-11T17:56:37.823095 - FETCH ComfyRegistry Data: 5/572025-03-11T17:56:37.823095 -
2025-03-11T17:56:39.352661 - [1;35m
### [START] ComfyUI AlekPet Nodes [1;34mv1.0.53[0m[1;35m ###[0m2025-03-11T17:56:39.352661 -
2025-03-11T17:56:39.352661 - [92mNode -> ArgosTranslateNode: [93mArgosTranslateCLIPTextEncodeNode, ArgosTranslateTextNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.352661 -
2025-03-11T17:56:39.353660 - [92mNode -> ChatGLMNode: [93mChatGLM4TranslateCLIPTextEncodeNode, ChatGLM4TranslateTextNode, ChatGLM4InstructNode, ChatGLM4InstructMediaNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.353660 -
2025-03-11T17:56:39.353660 - [92mNode -> DeepTranslatorNode: [93mDeepTranslatorCLIPTextEncodeNode, DeepTranslatorTextNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.353660 -
2025-03-11T17:56:39.353660 - [92mNode -> ExtrasNode: [93mPreviewTextNode, HexToHueNode, ColorsCorrectNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.353660 -
2025-03-11T17:56:39.353660 - [92mNode -> GoogleTranslateNode: [93mGoogleTranslateCLIPTextEncodeNode, GoogleTranslateTextNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.353660 -
2025-03-11T17:56:39.354661 - [92mNode -> IDENode: [93mIDENode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.354661 -
2025-03-11T17:56:39.354661 - [92mNode -> PainterNode: [93mPainterNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.354661 -
2025-03-11T17:56:39.354661 - [92mNode -> PoseNode: [93mPoseNode[0m [92m[92m[Loading][0m[0m2025-03-11T17:56:39.354661 -
2025-03-11T17:56:39.355660 - [1;35m### [END] ComfyUI AlekPet Nodes ###[0m2025-03-11T17:56:39.355660 -
2025-03-11T17:56:39.504952 - # 😺dzNodes: LayerStyle -> [1;33mCannot import name 'guidedFilter' from 'cv2.ximgproc'
A few nodes cannot works properly, while most nodes are not affected. Please REINSTALL package 'opencv-contrib-python'.
For detail refer to [4mhttps://github.com/chflame163/ComfyUI_LayerStyle/issues/5[0m[m2025-03-11T17:56:39.506950 -
2025-03-11T17:56:39.748707 - # 😺dzNodes: LayerStyle -> [1;33mCannot import name 'guidedFilter' from 'cv2.ximgproc'
A few nodes cannot works properly, while most nodes are not affected. Please REINSTALL package 'opencv-contrib-python'.
For detail refer to [4mhttps://github.com/chflame163/ComfyUI_LayerStyle/issues/5[0m[m2025-03-11T17:56:39.748707 -
2025-03-11T17:56:39.842252 - Please 'pip install xformers'2025-03-11T17:56:39.843253 -
2025-03-11T17:56:39.849218 - Nvidia APEX normalization not installed, using PyTorch LayerNorm2025-03-11T17:56:39.849218 -
2025-03-11T17:56:40.179954 - All packages from requirements.txt are installed and up to date.2025-03-11T17:56:40.179954 -
2025-03-11T17:56:40.186564 - llama-cpp installed2025-03-11T17:56:40.186564 -
2025-03-11T17:56:40.195563 - All packages from requirements.txt are installed and up to date.2025-03-11T17:56:40.195563 -
2025-03-11T17:56:41.401752 - [34mWAS Node Suite: [0mOpenCV Python FFMPEG support is enabled[0m2025-03-11T17:56:41.401752 -
2025-03-11T17:56:41.401752 - [34mWAS Node Suite [93mWarning: [0m`ffmpeg_bin_path` is not set in `E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\pr-was-node-suite-comfyui-47064894\was_suite_config.json` config file. Will attempt to use system ffmpeg binaries if available.[0m2025-03-11T17:56:41.401752 -
2025-03-11T17:56:42.157061 - [34mWAS Node Suite: [0mFinished.[0m [32mLoaded[0m [0m218[0m [32mnodes successfully.[0m2025-03-11T17:56:42.157061 -
2025-03-11T17:56:42.157061 -
[3m[93m"Do one thing every day that scares you."[0m[3m - Eleanor Roosevelt[0m
2025-03-11T17:56:42.157061 -
2025-03-11T17:56:42.183149 - Please 'pip install xformers'2025-03-11T17:56:42.183149 -
2025-03-11T17:56:42.190156 - Nvidia APEX normalization not installed, using PyTorch LayerNorm2025-03-11T17:56:42.190156 -
2025-03-11T17:56:42.385176 -
2025-03-11T17:56:42.385176 - [92m[rgthree-comfy] Loaded 42 exciting nodes. 🎉[00m2025-03-11T17:56:42.385176 -
2025-03-11T17:56:42.385176 -
2025-03-11T17:56:42.389367 -
Import times for custom nodes:
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\websocket_image_save.py
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-portrait-master-zh-cn
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\sdxl_prompt_styler
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\AIGODLIKE-COMFYUI-TRANSLATION
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\efficiency-nodes-comfyui
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\image-resize-comfyui
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\stability-ComfyUI-nodes
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-textoverlay
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\cg-use-everywhere
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-noembryo
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-HunyuanVideoMultiLora
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_AdvancedRefluxControl
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-YOLO
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-VideoHelperSuite
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Copilot
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-lama-remover
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\janus-pro
2025-03-11T17:56:42.390372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_controlnet_aux
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Impact-Pack
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyLiterals
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_zenid
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-image-saver
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-GGUF
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_instantid
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_ipadapter_plus
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\qq-nodes-comfyui
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-QualityOfLifeSuit_Omar92
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyMath
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-wd14-tagger
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\Comfy_KepListStuff
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_patches_ll
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_soze
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-various
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-AudioScheduler
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Custom-Scripts
2025-03-11T17:56:42.391372 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-frame-interpolation
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\derfuu_comfyui_moddednodes
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_essentials
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_ultimatesdupscale
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Florence2
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\rgthree-comfy
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-jakeupgrade
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-WanVideoWrapper
2025-03-11T17:56:42.392371 - 0.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\OneButtonPrompt
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Inspire-Pack
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_LayerStyle_Advance
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Comfyroll_CustomNodes
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-KJNodes
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-segment-anything-2
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Text_Image-Composite
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Crystools
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_PuLID_Flux_ll
2025-03-11T17:56:42.392371 - 0.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-tensorops
2025-03-11T17:56:42.392371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_segment_anything
2025-03-11T17:56:42.392371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-PuLID-Flux-Enhanced
2025-03-11T17:56:42.392371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-sound-lab
2025-03-11T17:56:42.393371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\pulid_comfyui
2025-03-11T17:56:42.393371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-HunyuanVideoWrapper
2025-03-11T17:56:42.393371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\CharacterFaceSwap
2025-03-11T17:56:42.393371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-reactor-node
2025-03-11T17:56:42.393371 - 0.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-impact-subpack
2025-03-11T17:56:42.393371 - 0.3 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-ollama
2025-03-11T17:56:42.393371 - 0.3 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_vlm_nodes
2025-03-11T17:56:42.393371 - 0.3 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui_layerstyle
2025-03-11T17:56:42.393371 - 0.7 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-supir
2025-03-11T17:56:42.393371 - 1.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Gemini
2025-03-11T17:56:42.393371 - 1.4 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-inspyrenet-rembg
2025-03-11T17:56:42.393371 - 1.6 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\pr-was-node-suite-comfyui-47064894
2025-03-11T17:56:42.393371 - 1.7 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-art-venture
2025-03-11T17:56:42.393371 - 1.9 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Allor
2025-03-11T17:56:42.393371 - 2.0 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Manager
2025-03-11T17:56:42.393371 - 2.3 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Easy-Use
2025-03-11T17:56:42.393371 - 3.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfy-mtb
2025-03-11T17:56:42.393371 - 3.4 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes
2025-03-11T17:56:42.394370 - 3.9 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-if_ai_tools
2025-03-11T17:56:42.394370 - 4.1 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_Custom_Nodes_AlekPet
2025-03-11T17:56:42.394370 - 8.2 seconds: E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-AudioReactive
2025-03-11T17:56:42.394370 -
2025-03-11T17:56:42.415461 - Starting server
2025-03-11T17:56:42.415461 - To see the GUI go to: http://127.0.0.1:8188
2025-03-11T17:56:43.623086 - FETCH ComfyRegistry Data: 10/572025-03-11T17:56:43.623086 -
2025-03-11T17:56:45.183171 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/photoswipe-lightbox.esm.min.js2025-03-11T17:56:45.183171 -
2025-03-11T17:56:45.214442 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/photoswipe.min.css2025-03-11T17:56:45.214442 -
2025-03-11T17:56:45.269076 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/pickr.min.js2025-03-11T17:56:45.269076 -
2025-03-11T17:56:45.336578 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/classic.min.css2025-03-11T17:56:45.336578 -
2025-03-11T17:56:45.613303 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.613303 -
2025-03-11T17:56:45.614179 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.614179 -
2025-03-11T17:56:45.614179 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.615226 -
2025-03-11T17:56:45.615226 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.615226 -
2025-03-11T17:56:45.616226 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.616226 -
2025-03-11T17:56:45.616226 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:56:45.616226 -
2025-03-11T17:56:45.636986 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.636986 -
2025-03-11T17:56:45.637986 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.637986 -
2025-03-11T17:56:45.637986 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.637986 -
2025-03-11T17:56:45.637986 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.637986 -
2025-03-11T17:56:45.637986 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:45.637986 -
2025-03-11T17:56:45.637986 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:56:45.637986 -
2025-03-11T17:56:48.092375 - [33mQualityOfLifeSuit_Omar92:[0m:NSP ready2025-03-11T17:56:48.092375 -
2025-03-11T17:56:49.812096 - FETCH ComfyRegistry Data: 15/572025-03-11T17:56:49.866101 -
2025-03-11T17:56:54.833257 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/model-viewer.min.js2025-03-11T17:56:54.833257 -
2025-03-11T17:56:54.899790 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/juxtapose.css2025-03-11T17:56:54.899790 -
2025-03-11T17:56:54.914795 - E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-mixlab-nodes\webApp\lib/juxtapose.min.js2025-03-11T17:56:54.914795 -
2025-03-11T17:56:55.170141 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.170141 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.170141 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.170141 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.170141 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.170141 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:56:55.170141 -
2025-03-11T17:56:55.185727 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.185727 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.185727 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.185727 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.185727 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.185727 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:56:55.185727 -
2025-03-11T17:56:55.752429 - FETCH ComfyRegistry Data: 20/572025-03-11T17:56:55.768060 -
2025-03-11T17:57:01.333164 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.333164 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.333164 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.333164 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.333164 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.333164 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:57:01.333164 -
2025-03-11T17:57:01.348871 - JSON parsing error with utf-8 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.348871 - JSON parsing error with utf-8-sig encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.348871 - JSON parsing error with latin1 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.348871 - JSON parsing error with cp1252 encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.348871 - JSON parsing error with gbk encoding: Expecting value: line 1 column 1 (char 0)2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.348871 - Error: Failed to load E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IF_AI\presets\florence_prompts.json with any supported encoding2025-03-11T17:57:01.348871 -
2025-03-11T17:57:01.799904 - FETCH ComfyRegistry Data: 25/572025-03-11T17:57:01.846804 -
2025-03-11T17:57:07.833030 - FETCH ComfyRegistry Data: 30/572025-03-11T17:57:07.847815 -
2025-03-11T17:57:13.685182 - FETCH ComfyRegistry Data: 35/572025-03-11T17:57:13.685182 -
2025-03-11T17:57:19.526079 - FETCH ComfyRegistry Data: 40/572025-03-11T17:57:19.526079 -
2025-03-11T17:57:25.391656 - FETCH ComfyRegistry Data: 45/572025-03-11T17:57:25.391656 -
2025-03-11T17:57:31.433422 - FETCH ComfyRegistry Data: 50/572025-03-11T17:57:31.433422 -
2025-03-11T17:57:37.366626 - FETCH ComfyRegistry Data: 55/572025-03-11T17:57:37.366626 -
2025-03-11T17:57:40.081351 - FETCH ComfyRegistry Data [DONE]2025-03-11T17:57:40.081351 -
2025-03-11T17:57:40.136972 - [ComfyUI-Manager] default cache updated: https://api.comfy.org/nodes
2025-03-11T17:57:40.203523 - nightly_channel: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/remote
2025-03-11T17:57:40.220908 - FETCH DATA from: https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/custom-node-list.json2025-03-11T17:57:40.220908 - 2025-03-11T17:57:41.183179 - [DONE]2025-03-11T17:57:41.183179 -
2025-03-11T17:57:41.566613 - [ComfyUI-Manager] All startup tasks have been completed.
2025-03-11T17:57:51.683921 - got prompt
2025-03-11T17:57:56.234131 - Requested to load PromptEnhancer
2025-03-11T17:58:00.450966 - loaded completely 5645.8 5645.7998046875 False
2025-03-11T17:58:00.684755 - !!! Exception during processing !!! Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
2025-03-11T17:58:00.751656 - Traceback (most recent call last):
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 327, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 202, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 174, in _map_node_over_list
process_inputs(input_dict, i)
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 163, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 202, in enhance
enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 42, in forward
enhanced_prompt = generate_cinematic_prompt(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 87, in generate_cinematic_prompt
prompts = _generate_t2v_prompt(
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 160, in _generate_t2v_prompt
return _generate_and_decode_prompts(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 243, in _generate_and_decode_prompts
outputs = prompt_enhancer_model.generate(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2223, in generate
result = self._sample(
^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 3211, in _sample
outputs = self(**model_inputs, return_dict=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\deprecation.py", line 172, in wrapped_func
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 842, in forward
outputs = self.model(
^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 571, in forward
position_embeddings = self.rotary_emb(hidden_states, position_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 132, in forward
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
2025-03-11T17:58:00.751656 - Prompt executed in 9.07 seconds
2025-03-11T17:58:19.734530 - got prompt
2025-03-11T17:58:21.999651 - Requested to load PromptEnhancer
2025-03-11T17:58:23.900827 - loaded completely 5615.8 5615.7998046875 False
2025-03-11T17:58:23.916795 - !!! Exception during processing !!! Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
2025-03-11T17:58:23.916795 - Traceback (most recent call last):
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 327, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 202, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 174, in _map_node_over_list
process_inputs(input_dict, i)
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 163, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 202, in enhance
enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_nodes.py", line 42, in forward
enhanced_prompt = generate_cinematic_prompt(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 87, in generate_cinematic_prompt
prompts = _generate_t2v_prompt(
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 160, in _generate_t2v_prompt
return _generate_and_decode_prompts(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-LTXVideo\prompt_enhancer_utils.py", line 243, in _generate_and_decode_prompts
outputs = prompt_enhancer_model.generate(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2223, in generate
result = self._sample(
^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 3211, in _sample
outputs = self(**model_inputs, return_dict=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\deprecation.py", line 172, in wrapped_func
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 842, in forward
outputs = self.model(
^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 571, in forward
position_embeddings = self.rotary_emb(hidden_states, position_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\APP_install\ComfyUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\llama\modeling_llama.py", line 132, in forward
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_bmm)
2025-03-11T17:58:23.916795 - Prompt executed in 4.18 seconds
Attached Workflow
Please make sure that workflow does not contain any sensitive information such as API keys or passwords.
{"last_node_id":3,"last_link_id":2,"nodes":[{"id":2,"type":"LTXVPromptEnhancerLoader","pos":[793.4646606445312,-19.623332977294922],"size":[428.4000244140625,82],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"prompt_enhancer","localized_name":"prompt_enhancer","label":"prompt_enhancer","type":"LTXV_PROMPT_ENHANCER","links":[1],"slot_index":0}],"properties":{"cnr_id":"ltxv","ver":"eabd4b64169232e1ab3ca5963dcd400de58552d0","Node name for S&R":"LTXVPromptEnhancerLoader"},"widgets_values":["unsloth/Llama-3.2-3B-Instruct","MiaoshouAI/Florence-2-large-PromptGen-v2.0"]},{"id":3,"type":"ShowText|pysssss","pos":[1814.4144287109375,46.72268295288086],"size":[329.86944580078125,203.8621063232422],"flags":{},"order":2,"mode":0,"inputs":[{"name":"text","type":"STRING","widget":{"name":"text"},"link":2}],"outputs":[{"name":"STRING","localized_name":"STRING","label":"STRING","type":"STRING","shape":6,"links":null}],"properties":{"cnr_id":"comfyui-custom-scripts","ver":"2c09d59ab5ac27ac59022832bfde4eeeb9c55825","Node name for S&R":"ShowText|pysssss"},"widgets_values":[""]},{"id":1,"type":"LTXVPromptEnhancer","pos":[1274.6363525390625,143.1318817138672],"size":[335.70489501953125,124.35137939453125],"flags":{},"order":1,"mode":0,"inputs":[{"name":"prompt_enhancer","localized_name":"prompt_enhancer","label":"prompt_enhancer","type":"LTXV_PROMPT_ENHANCER","link":1},{"name":"image_prompt","localized_name":"image_prompt","label":"image_prompt","type":"IMAGE","shape":7,"link":null}],"outputs":[{"name":"str","localized_name":"str","label":"str","type":"STRING","links":[2],"slot_index":0}],"properties":{"cnr_id":"ltxv","ver":"eabd4b64169232e1ab3ca5963dcd400de58552d0","Node name for S&R":"LTXVPromptEnhancer"},"widgets_values":["dog",256]}],"links":[[1,2,0,1,0,"LTXV_PROMPT_ENHANCER"],[2,1,0,3,0,"STRING"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.4122927695244514,"offset":[-573.8784791710717,280.4049554078063]},"ue_links":[]},"version":0.4}
Additional Context
(Please add any additional context or steps to reproduce the error here)
i meet the same wrong
I have the same error, even when passing --cuda-device 0 to main.py Seems LTXVPromptEnhancer does not honor this setting?
我也遇到同样的问题,你们如何解决呢
I have the same error, even when passing --cuda-device 0 to main.py Seems LTXVPromptEnhancer does not honor this setting?
Maybe it's a problem with comfyui, and it's very likely that this problem only occurs on devices with low video memory!
Same here. Submitted a report. I thought the LTX model was supposed to be for low memory, though?
Same here. As a solution now I make an additional hint in google gemini and it works great
Same here. As a solution now I make an additional hint in google gemini and it works great
What was the solution?
Modifying one line of code temporarily solved the problem。 Add a line of code to the prompt_enhancer_nodes.py file as follows 145 load_device = torch.device("cpu")
I2V: U need to change the prompt enchanger py and utils.py Code for
py to replace:
import os import shutil
import comfy.model_management import comfy.model_patcher import folder_paths import torch from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer
from .nodes_registry import comfy_node from .prompt_enhancer_utils import generate_cinematic_prompt
LLM_NAME = ["unsloth/Llama-3.2-3B-Instruct"] IMAGE_CAPTIONER = ["MiaoshouAI/Florence-2-large-PromptGen-v2.0"] MODELS_PATH_KEY = "LLM"
class PromptEnhancer(torch.nn.Module): def init( self, image_caption_processor: AutoProcessor, image_caption_model: AutoModelForCausalLM, llm_model: AutoModelForCausalLM, llm_tokenizer: AutoTokenizer, ): super().init() self.image_caption_processor = image_caption_processor self.image_caption_model = image_caption_model self.llm_model = llm_model self.llm_tokenizer = llm_tokenizer self.device = image_caption_model.device # model parameters and buffer sizes plus some extra 1GB. self.model_size = ( self.get_model_size(self.image_caption_model) + self.get_model_size(self.llm_model) + 1073741824 )
def forward(self, prompt, image_conditioning, max_resulting_tokens):
enhanced_prompt = generate_cinematic_prompt(
self.image_caption_model,
self.image_caption_processor,
self.llm_model,
self.llm_tokenizer,
prompt,
image_conditioning,
max_new_tokens=max_resulting_tokens,
)
return enhanced_prompt
@staticmethod
def get_model_size(model):
total_size = sum(p.numel() * p.element_size() for p in model.parameters())
total_size += sum(b.numel() * b.element_size() for b in model.buffers())
return total_size
def memory_required(self, input_shape):
return self.model_size
@comfy_node(name="LTXVPromptEnhancerLoader") class LTXVPromptEnhancerLoader: @classmethod def INPUT_TYPES(s): return { "required": { "llm_name": ( "STRING", { "default": LLM_NAME, "tooltip": "The hugging face name of the llm model to load.", }, ), "image_captioner_name": ( "STRING", { "default": IMAGE_CAPTIONER, "tooltip": "The hugging face name of the image captioning model to load.", }, ), } }
RETURN_TYPES = ("LTXV_PROMPT_ENHANCER",)
RETURN_NAMES = ("prompt_enhancer",)
FUNCTION = "load"
CATEGORY = "lightricks/LTXV"
TITLE = "LTXV Prompt Enhancer (Down)Loader"
OUTPUT_NODE = False
def model_path_download_if_needed(self, model_name):
model_directory = os.path.join(folder_paths.models_dir, MODELS_PATH_KEY)
os.makedirs(model_directory, exist_ok=True)
model_name_ = model_name.rsplit("/", 1)[-1]
model_path = os.path.join(model_directory, model_name_)
if not os.path.exists(model_path):
from huggingface_hub import snapshot_download
try:
snapshot_download(
repo_id=model_name,
local_dir=model_path,
local_dir_use_symlinks=False,
)
except Exception:
shutil.rmtree(model_path, ignore_errors=True)
raise
return model_path
def down_load_llm_model(self, llm_name, load_device):
model_path = self.model_path_download_if_needed(llm_name)
llm_model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
)
llm_model = llm_model.to(load_device) # ✅ FIX HIER
llm_tokenizer = AutoTokenizer.from_pretrained(
model_path,
)
return llm_model, llm_tokenizer
def down_load_image_captioner(self, image_captioner, load_device):
model_path = self.model_path_download_if_needed(image_captioner)
image_caption_model = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
image_caption_model = image_caption_model.to(load_device) # ✅ FIX HIER
image_caption_processor = AutoProcessor.from_pretrained(
model_path, trust_remote_code=True
)
return image_caption_model, image_caption_processor
def load(self, llm_name, image_captioner_name):
load_device = comfy.model_management.get_torch_device()
offload_device = comfy.model_management.vae_offload_device()
llm_model, llm_tokenizer = self.down_load_llm_model(llm_name, load_device)
image_caption_model, image_caption_processor = self.down_load_image_captioner(
image_captioner_name, load_device
)
enhancer = PromptEnhancer(
image_caption_processor, image_caption_model, llm_model, llm_tokenizer
)
patcher = comfy.model_patcher.ModelPatcher(
enhancer,
load_device,
offload_device,
)
return (patcher,)
@comfy_node(name="LTXVPromptEnhancer") class LTXVPromptEnhancer: @classmethod def INPUT_TYPES(s): return { "required": { "prompt": ("STRING",), "prompt_enhancer": ("LTXV_PROMPT_ENHANCER",), "max_resulting_tokens": ( "INT", {"default": 256, "min": 32, "max": 512}, ), }, "optional": { "image_prompt": ("IMAGE",), }, }
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("str",)
FUNCTION = "enhance"
CATEGORY = "lightricks/LTXV"
TITLE = "LTXV Prompt Enhancer"
OUTPUT_NODE = False
def enhance(
self,
prompt,
prompt_enhancer: comfy.model_patcher.ModelPatcher,
image_prompt: torch.Tensor = None,
max_resulting_tokens=256,
):
comfy.model_management.free_memory(
prompt_enhancer.memory_required([]),
comfy.model_management.get_torch_device(),
)
comfy.model_management.load_model_gpu(prompt_enhancer)
model = prompt_enhancer.model
image_conditioning = None
if image_prompt is not None:
permuted_image = image_prompt.permute(3, 0, 1, 2)[None, :]
image_conditioning = [(permuted_image, 0, 1.0)]
enhanced_prompt = model(prompt, image_conditioning, max_resulting_tokens)
return (enhanced_prompt[0],)
for utils.py:
import logging import random from typing import List, Optional, Tuple, Union
import torch from PIL import Image
logger = logging.getLogger(name) # pylint: disable=invalid-name
T2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes. Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph. Start directly with the action, and keep descriptions literal and precise. Think like a cinematographer describing a shot list. Do not change the user input intent, just enhance it. Keep within 150 words. For best results, build your prompts using this structure: Start with main action in a single sentence Add specific details about movements and gestures Describe character/object appearances precisely Include background and environment details Specify camera angles and movements Describe lighting and colors Note any changes or sudden events Do not exceed the 150 word limit! Output the enhanced prompt only.
Examples: user prompt: A man drives a toyota car. enhanced prompt: A person is driving a car on a two-lane road, holding the steering wheel with both hands. The person's hands are light-skinned and they are wearing a black long-sleeved shirt. The steering wheel has a Toyota logo in the center and black leather around it. The car's dashboard is visible, showing a speedometer, tachometer, and navigation screen. The road ahead is straight and there are trees and fields visible on either side. The camera is positioned inside the car, providing a view from the driver's perspective. The lighting is natural and overcast, with a slightly cool tone.
user prompt: A young woman is sitting on a chair. enhanced prompt: A young woman with dark, curly hair and pale skin sits on a chair; she wears a dark, intricately patterned dress with a high collar and long, dark gloves that extend past her elbows; the scene is dimly lit, with light streaming in from a large window behind the characters.
user prompt: Aerial view of a city skyline. enhanced prompt: The camera pans across a cityscape of tall buildings with a circular building in the center. The camera moves from left to right, showing the tops of the buildings and the circular building in the center. The buildings are various shades of gray and white, and the circular building has a green roof. The camera angle is high, looking down at the city. The lighting is bright, with the sun shining from the upper left, casting shadows from the buildings. """
I2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes. Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph. Start directly with the action, and keep descriptions literal and precise. Think like a cinematographer describing a shot list. Keep within 150 words. For best results, build your prompts using this structure: Describe the image first and then add the user input. Image description should be in first priority! Align to the image caption if it contradicts the user text input. Start with main action in a single sentence Add specific details about movements and gestures Describe character/object appearances precisely Include background and environment details Specify camera angles and movements Describe lighting and colors Note any changes or sudden events Align to the image caption if it contradicts the user text input. Do not exceed the 150 word limit! Output the enhanced prompt only. """
def tensor_to_pil(tensor): # Ensure tensor is in range [-1, 1] assert tensor.min() >= -1 and tensor.max() <= 1
# Convert from [-1, 1] to [0, 1]
tensor = (tensor + 1) / 2
# Rearrange from [C, H, W] to [H, W, C]
tensor = tensor.permute(1, 2, 0)
# Convert to numpy array and then to uint8 range [0, 255]
numpy_image = (tensor.cpu().numpy() * 255).astype("uint8")
# Convert to PIL Image
return Image.fromarray(numpy_image)
def generate_cinematic_prompt( image_caption_model, image_caption_processor, prompt_enhancer_model, prompt_enhancer_tokenizer, prompt: Union[str, List[str]], conditioning_items: Optional[List[Tuple[torch.Tensor, int, float]]] = None, max_new_tokens: int = 256, ) -> List[str]: prompts = [prompt] if isinstance(prompt, str) else prompt
if conditioning_items is None:
prompts = _generate_t2v_prompt(
prompt_enhancer_model,
prompt_enhancer_tokenizer,
prompts,
max_new_tokens,
T2V_CINEMATIC_PROMPT,
)
else:
# if len(conditioning_items) > 1 or conditioning_items[0][1] != 0:
# logger.warning(
# "prompt enhancement does only support first frame of conditioning items, returning original prompts"
# )
# return prompts
first_frame_conditioning_item = conditioning_items[0]
first_frames = _get_first_frames_from_conditioning_item(
first_frame_conditioning_item
)
assert len(first_frames) == len(
prompts
), "Number of conditioning frames must match number of prompts"
prompts = _generate_i2v_prompt(
image_caption_model,
image_caption_processor,
prompt_enhancer_model,
prompt_enhancer_tokenizer,
prompts,
first_frames,
max_new_tokens,
I2V_CINEMATIC_PROMPT,
)
return prompts
def _get_first_frames_from_conditioning_item( conditioning_item: Tuple[torch.Tensor, int, float] ) -> List[Image.Image]: frames_tensor = conditioning_item[0] # tensor shape: [batch_size, 3, num_frames, height, width], take first frame from each sample return [ tensor_to_pil(frames_tensor[i, :, 0, :, :]) for i in range(frames_tensor.shape[0]) ]
def _generate_t2v_prompt( prompt_enhancer_model, prompt_enhancer_tokenizer, prompts: List[str], max_new_tokens: int, system_prompt: str, ) -> List[str]: messages = [ [ {"role": "system", "content": system_prompt}, {"role": "user", "content": f"user_prompt: {p}"}, ] for p in prompts ]
texts = [
prompt_enhancer_tokenizer.apply_chat_template(
m, tokenize=False, add_generation_prompt=True
)
for m in messages
]
model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to(
prompt_enhancer_model.device
)
return _generate_and_decode_prompts(
prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens
)
def _generate_i2v_prompt( image_caption_model, image_caption_processor, prompt_enhancer_model, prompt_enhancer_tokenizer, prompts: List[str], first_frames: List[Image.Image], max_new_tokens: int, system_prompt: str, ) -> List[str]: image_captions = _generate_image_captions( image_caption_model, image_caption_processor, first_frames )
messages = [
[
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"user_prompt: {p}\nimage_caption: {c}"},
]
for p, c in zip(prompts, image_captions)
]
texts = [
prompt_enhancer_tokenizer.apply_chat_template(
m, tokenize=False, add_generation_prompt=True
)
for m in messages
]
model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to(
prompt_enhancer_model.device
)
return _generate_and_decode_prompts(
prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens
)
def _generate_image_captions( image_caption_model, image_caption_processor, images: List[Image.Image], system_prompt: str = "<DETAILED_CAPTION>", ) -> List[str]: image_caption_prompts = [system_prompt] * len(images) inputs = image_caption_processor( image_caption_prompts, images, return_tensors="pt" )
device = image_caption_model.device
# Alle Tensoren im Input-Dictionary auf das gleiche Gerät verschieben
inputs = {k: v.to(device) for k, v in inputs.items() if isinstance(v, torch.Tensor)}
with torch.inference_mode():
generated_ids = image_caption_model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
do_sample=False,
num_beams=3,
)
return image_caption_processor.batch_decode(generated_ids, skip_special_tokens=True)
def _get_random_scene_type(): """ Randomly select a scene type to add to the prompt. """ types = [ "The scene is captured in real-life footage.", "The scene is computer-generated imagery.", "The scene appears to be from a movie.", "The scene appears to be from a TV show.", "The scene is captured in a studio.", ] return random.choice(types)
def _generate_and_decode_prompts( prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens: int ) -> List[str]: with torch.inference_mode(): outputs = prompt_enhancer_model.generate( **model_inputs, max_new_tokens=max_new_tokens ) generated_ids = [ output_ids[len(input_ids) :] for input_ids, output_ids in zip(model_inputs.input_ids, outputs) ] decoded_prompts = prompt_enhancer_tokenizer.batch_decode( generated_ids, skip_special_tokens=True )
decoded_prompts = [p + f" {_get_random_scene_type()}." for p in decoded_prompts]
print(decoded_prompts)
return decoded_prompts
If only there was a way to print code changes more efficiently 🤔
If only there was a way to print code changes more efficiently 🤔
I cant upload py or zip files