wenet icon indicating copy to clipboard operation
wenet copied to clipboard

torch.jit.Error: The following operation failed in the TorchScript interpreter.

Open JeremyWangK opened this issue 1 year ago • 1 comments

Run the test like: import wenet

model = wenet.load_model('chinese') result = model.transcribe(r'./data/audio.wav') print(result['text'])

With Error: C:\Users\Administrator.conda\envs\wenet\python.exe G:\Develop\Project\wenet-main\test.py Traceback (most recent call last): File "G:\Develop\Project\wenet-main\test.py", line 5, in result = model.transcribe(r'./data/audio.wav') File "G:\Develop\Project\wenet-main\wenet\cli\model.py", line 139, in transcribe return self._decode(audio_file, tokens_info) File "C:\Users\Administrator.conda\envs\wenet\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "G:\Develop\Project\wenet-main\wenet\cli\model.py", line 89, in _decode encoder_out, _, _ = self.model.forward_encoder_chunk(feats, 0, -1) torch.jit.Error: The following operation failed in the TorchScript interpreter. Traceback of TorchScript, serialized code (most recent call last): File "code/torch/wenet/transformer/asr_model.py", line 72, in forward_encoder_chunk cnn_cache: Tensor=CONSTANTS.c0) -> Tuple[Tensor, Tensor, Tensor]: encoder = self.encoder _9 = (encoder).forward_chunk(xs, offset, required_cache_size, att_cache, cnn_cache, CONSTANTS.c1, ) ~~~~~~~~~~~~~~~~~~~~~~ <--- HERE return _9 def is_bidirectional_decoder(self: torch.wenet.transformer.asr_model.ASRModel) -> bool: File "code/torch/wenet/transformer/encoder.py", line 115, in forward_chunk xs17 = (global_cmvn).forward(xs, ) embed = self.embed _44 = (embed).forward(xs17, tmp_masks0, offset, ) ~~~~~~~~~~~~~~ <--- HERE xs18, pos_emb, _45, = _44 elayers = torch.size(att_cache, 0) File "code/torch/wenet/transformer/subsampling.py", line 24, in forward x2 = (out).forward(_1, ) pos_enc = self.pos_enc x3, pos_emb, = (pos_enc).forward(x2, offset, ) ~~~~~~~~~~~~~~~~ <--- HERE _2 = torch.slice(torch.slice(torch.slice(x_mask), 1), 2, 2, None, 2) _3 = torch.slice(torch.slice(torch.slice(_2), 1), 2, 2, None, 2) File "code/torch/wenet/transformer/embedding.py", line 18, in forward xscale = self.xscale x0 = torch.mul(x, xscale) pos_emb = (self).position_encoding(offset, torch.size(x0, 1), False, ) ~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE dropout = self.dropout _0 = (dropout).forward(x0, ) File "code/torch/wenet/transformer/embedding.py", line 36, in position_encoding pass else: ops.prim.RaiseException("AssertionError: ") ~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE pe = self.pe pos_emb0 = torch.slice(torch.slice(pe), 1, offset0, torch.add(offset0, size))

Traceback of TorchScript, original code (most recent call last): File "/home/xcsong/workspace/wenet/wenet/transformer/asr_model.py", line 367, in forward_encoder_chunk

    """
    return self.encoder.forward_chunk(xs, offset, required_cache_size,
           ~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
                                      att_cache, cnn_cache)

File "/home/xcsong/workspace/wenet/wenet/transformer/encoder.py", line 231, in forward_chunk xs = self.global_cmvn(xs) # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim) xs, pos_emb, _ = self.embed(xs, tmp_masks, offset) ~~~~~~~~~~ <--- HERE # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim) elayers, cache_t1 = att_cache.size(0), att_cache.size(2) File "/home/xcsong/workspace/wenet/wenet/transformer/subsampling.py", line 225, in forward b, c, t, f = x.size() x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) x, pos_emb = self.pos_enc(x, offset) ~~~~~~~~~~~~ <--- HERE return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2] File "/home/xcsong/workspace/wenet/wenet/transformer/embedding.py", line 145, in forward self.pe = self.pe.to(x.device) x = x * self.xscale pos_emb = self.position_encoding(offset, x.size(1), False) ~~~~~~~~~~~~~~~~~~~~~~ <--- HERE return self.dropout(x), self.dropout(pos_emb) File "/home/xcsong/workspace/wenet/wenet/transformer/embedding.py", line 100, in position_encoding # https://github.com/pytorch/pytorch/issues/69434 if isinstance(offset, int): assert offset + size <= self.max_len ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE pos_emb = self.pe[:, offset:offset + size] elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar RuntimeError: AssertionError:

JeremyWangK avatar Jul 29 '24 02:07 JeremyWangK

python version?

Mddct avatar Aug 07 '24 05:08 Mddct