coremltools
coremltools copied to clipboard
ValueError: No matching select or slice.
import torch
import torch.nn as nn
import numpy as np
import coremltools as ct
import librosa
from fairseq import checkpoint_utils
audio, _ = librosa.load("/Users/admin/Desktop/yao/VC/data/test_women/women.mp3", sr=16000)
feats = torch.from_numpy(audio)
feats = feats.float()
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
in_feats = feats.view(1, -1)
class CustomModule(nn.Module):
def __init__(self):
super(CustomModule, self).__init__()
def forward(self, in_feats):
device = 'cpu'
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["/Users/admin/Desktop/yao/VC/weights/hubert_base.pt"], suffix="")
hubert_model = models[0]
hubert_model = hubert_model.to(device)
hubert_model = hubert_model.float()
hubert_model.eval()
pm = np.zeros(in_feats.shape, dtype=bool)
in_feats.to(device)
padding_mask = torch.BoolTensor(pm).to(device)
output_layer = 12
with torch.no_grad():
feats = hubert_model.extract_features(source=in_feats, padding_mask=padding_mask, output_layer=output_layer)[0]
return feats
def export_hubert():
hubert = CustomModule()
hubert.eval()
traced_model = torch.jit.trace(hubert, in_feats)
mlmodel = ct.convert(
traced_model,
source='pytorch',
inputs=[ct.TensorType(name="audio", shape=audio.shape, dtype=np.float32)],
compute_units=ct.ComputeUnit.CPU_AND_GPU,
minimum_deployment_target=ct.target.macOS13)
mlmodel.save("tools/hubert.mlmodel")
if __name__ == "__main__":
export_hubert()
error like this:
Traceback (most recent call last):
File "/Users/admin/Desktop/yao/VC/tools/export_coreml.py", line 144, in <module>
export_hubert()
File "/Users/admin/Desktop/yao/VC/tools/export_coreml.py", line 132, in export_hubert
mlmodel = ct.convert(
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/_converters_entry.py", line 574, in convert
mlmodel = mil_convert(
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 188, in mil_convert
return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 212, in _mil_convert
proto, mil_program = mil_convert_to_proto(
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 286, in mil_convert_to_proto
prog = frontend_converter(model, **kwargs)
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 108, in __call__
return load(*args, **kwargs)
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 71, in load
converter = TorchConverter(
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 363, in __init__
p(self.graph)
File "/Users/admin/opt/anaconda3/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/torchir_passes.py", line 151, in generate_tensor_assignment_ops
raise ValueError("No matching select or slice.")
ValueError: No matching select or slice.
In order to help you, we need to be able to reproduce the issue. Can you give us a minimal example to reproduce this issue? One with doesn't require loading an mp3
and pt
files.
:
Thank you for your help,you can find hubert_base.pt in https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main,and replace in_feats=torch.rand([1, 64000])
Using in_feats=torch.rand([1, 64000])
sounds good. However loading a .pt
files, even one from Hugging Face, is insecure. Can you do some further investigation and give us PyTorch code that we can just copy and paste?