ooe1123

Results 118 comments of ooe1123

### demo.py ``` import torch import soundfile as sf from wavenet_model import * # from audio_data import WavenetDataset from wavenet_training import * from model_logging import * # initialize cuda option...

### export 〇 wavenet_model.py ``` class WaveNetModel(nn.Module): ... def queue_dilate(self, input, dilation, init_dilation, i): queue.enqueue(input.data[0]) ... def generate_fast(...): ... input = Variable(torch.FloatTensor(1, self.classes, 1).zero_()) input = input.scatter_(1, first_samples[0:1].view(1, -1, 1),...

## Qwen2-VL-2B_vis.onnx 〇 transformers/models/qwen2_vl/modeling_qwen2_vl.py ``` class VisionSdpaAttention(nn.Module): ... def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None ) -> torch.Tensor: ... for i in range(1, len(cu_seqlens)): attention_mask[..., cu_seqlens[i...

## Qwen2-VL-2B.onnx 〇 transformers/cache_utils.py ``` class DynamicCache(Cache): ... def __init__(self, num_hidden_layers: Optional[int] = None) -> None: super().__init__() self._seen_tokens = 0 # Used in `generate` to keep tally of how many...

## Qwen2-VL-2B_vis.onnx(none_torch.repeat_interleave) 〇 transformers/models/qwen2_vl/modeling_qwen2_vl.py ``` class Qwen2VisionTransformerPretrainedModel(Qwen2VLPreTrainedModel): ... def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) ... pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids...

### t2s_encoder.onnx, t2s_fsdec.onnx, t2s_sdec.onnx ○ GPT_SoVITS/onnx_export.py ``` class T2SModel(nn.Module): ... def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): early_stop_num = self.t2s_model.early_stop_num ... y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) stop...

### vq_model.onnx ○ GPT_SoVITS/inference_webui.py ``` def get_tts_wav(...): ... for i_text,text in enumerate(texts): if model_version!="v3": ... else: fea_ref,ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) ``` ↓ ``` def get_tts_wav(...): ... for i_text,text in...

### vq_cfm.onnx ○ GPT_SoVITS/module/models.py ``` class CFM(torch.nn.Module): ... def inference(self, mu, x_lens, prompt, n_timesteps, temperature=1.0, inference_cfg_rate=0): ... for j in range(n_timesteps): t_tensor = torch.ones(x.shape[0], device=x.device,dtype=mu.dtype) * t d_tensor = torch.ones(x.shape[0],...

### bigvgan_model.onnx ○ GPT_SoVITS/inference_webui.py ``` def get_tts_wav(...): ... for i_text,text in enumerate(texts): if model_version!="v3": ... else: ... with torch.inference_mode(): wav_gen = bigvgan_model(cmf_res) ``` ↓ ``` def get_tts_wav(...): ... for i_text,text...

### vq_model.onnx (enable speed option) ○ GPT_SoVITS/module/models.py ``` class TextEncoder(nn.Module): ... def forward(self, y, y_lengths, text, text_lengths, ge, speed=1,test=None): ... if(speed!=1): y = F.interpolate(y, size=int(y.shape[-1] / speed)+1, mode="linear") y_mask =...