parler-tts
parler-tts copied to clipboard
NotImplementedError: Output channels > 65536 not supported at the MPS device
Traceback (most recent call last):
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/gradio/queueing.py", line 625, in process_events
response = await route_utils.call_process_api(
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/gradio/route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/gradio/blocks.py", line 2047, in process_api
result = await self.call_function(
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/gradio/blocks.py", line 1594, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2505, in run_sync_in_worker_thread
return await future
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 1005, in run
result = context.run(func, *args)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/gradio/utils.py", line 869, in wrapper
response = f(*args, **kwargs)
File "/Users/user/LLM/parler-tts/helpers/gradio_demo/app.py", line 45, in gen_tts
generation = model.generate(
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/parler_tts/modeling_parler_tts.py", line 3637, in generate
sample = self.audio_encoder.decode(audio_codes=sample[None, ...], **single_audio_decode_kwargs).audio_values
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/parler_tts/dac_wrapper/modeling_dac.py", line 139, in decode
audio_values = self.model.decode(audio_values)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/dac/model/dac.py", line 266, in decode
return self.decoder(z)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/dac/model/dac.py", line 144, in forward
return self.model(x)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/container.py", line 250, in forward
input = module(input)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/dac/model/dac.py", line 112, in forward
return self.block(x)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/container.py", line 250, in forward
input = module(input)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/dac/model/dac.py", line 36, in forward
y = self.block(x)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/container.py", line 250, in forward
input = module(input)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 375, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/Users/user/miniconda3/envs/parler-tts/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 370, in _conv_forward
return F.conv1d(
NotImplementedError: Output channels > 65536 not supported at the MPS device. As a temporary fix, you can set the environment variable PYTORCH_ENABLE_MPS_FALLBACK=1 to use the CPU as a fallback for this op. WARNING: this will be slower than running natively on MPS.
I encountered this issue as well, following this I installed the nightly version of PyTorch which fixed it.