Error reported during reasoning——“NotImplementedError: No operator found for `memory_efficient_attention_forward` with inputs:”
Translate: 一只浣熊站在黑板前,上面写着 * --> A raccoon stands in front of the blackboard with the words *
Traceback (most recent call last):
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\gradio\queueing.py", line 407, in call_prediction
output = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\gradio\route_utils.py", line 226, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\gradio\blocks.py", line 1550, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\gradio\blocks.py", line 1185, in call_function
prediction = await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\gradio\utils.py", line 661, in wrapper
response = f(*args, **kwargs)
^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\demo.py", line 169, in process
results, rtn_code, rtn_warning, debug_info = inference(input_data, mode=mode, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\modelscope\pipelines\base.py", line 219, in call
output = self._process_single(input, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\modelscope\pipelines\base.py", line 254, in _process_single
out = self.forward(out, **forward_params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chenyixuan.cache\modelscope\modelscope_modules\cv_anytext_text_generation_editing\ms_wrapper.py", line 339, in forward
return super().forward(inputs, **forward_params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\modelscope\pipelines\base.py", line 397, in forward
return self.model(inputs, **forward_params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\modelscope\models\base\base_torch_model.py", line 36, in call
return self.postprocess(self.forward(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chenyixuan.cache\modelscope\modelscope_modules\cv_anytext_text_generation_editing\ms_wrapper.py", line 176, in forward
encoder_posterior = self.model.encode_first_stage(masked_img[None, ...])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\ldm\models\diffusion\ddpm.py", line 870, in encode_first_stage
return self.first_stage_model.encode(x)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\ldm\models\autoencoder.py", line 83, in encode
h = self.encoder(x)
^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\ldm\modules\diffusionmodules\model.py", line 536, in forward
h = self.mid.attn_1(h)
^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\torch\nn\modules\module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\ldm\modules\diffusionmodules\model.py", line 258, in forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\xformers\ops\fmha_init.py", line 192, in memory_efficient_attention
return memory_efficient_attention(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\xformers\ops\fmha_init.py", line 290, in _memory_efficient_attention
return memory_efficient_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\xformers\ops\fmha_init.py", line 306, in _memory_efficient_attention_forward
op = _dispatch_fw(inp)
^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 94, in _dispatch_fw
return _run_priority_list(
^^^^^^^^^^^^^^^^^^^
File "D:\python\test\ai\AnyText\venv\Lib\site-packages\xformers\ops\fmha\dispatch.py", line 69, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward with inputs: