MuseTalk icon indicating copy to clipboard operation
MuseTalk copied to clipboard

怎么多卡执行

Open Liutengjie opened this issue 5 months ago • 5 comments

GPU能多卡推理嘛,如何设置

Liutengjie avatar Jul 11 '25 09:07 Liutengjie

同问。。

WangSiyao666 avatar Aug 19 '25 08:08 WangSiyao666

可以的 我是改的源码,在inference里启动n个gpu进程,然后每个进程都加载一边模型,将数据切分交给gpu运算 最后在拼接,

xub69961415 avatar Aug 22 '25 07:08 xub69961415

可以的 我是改的源码,在inference里启动n个gpu进程,然后每个进程都加载一边模型,将数据切分交给gpu运算 最后在拼接,

大佬方便贴一下源码么

WangSiyao666 avatar Aug 22 '25 09:08 WangSiyao666

import argparse import os from operator import truediv from queue import Empty

from mpmath import monitor from omegaconf import OmegaConf import numpy as np import cv2 import torch import glob import pickle import sys from tqdm import tqdm import copy import json from transformers import WhisperModel from .watchdog_inference import FolderWatcher,mp3_queue from musetalk.utils.face_parsing import FaceParsing from musetalk.utils.utils import datagen from musetalk.utils.preprocessing import get_landmark_and_bbox, read_imgs from musetalk.utils.blending import get_image_prepare_material, get_image_blending from musetalk.utils.utils import load_all_model from musetalk.utils.audio_processor import AudioProcessor import glob import shutil import threading import queue import time import subprocess from pathlib import Path import multiprocessing as mp from queue import Queue from dataclasses import dataclass from typing import Optional import time from .multimodal_queues import * from threading import Lock from typing import Dict, Any, Optional lock = threading.Lock() from .websocket_server import * #定义redis存储器 redis_manager = RedisManager() @dataclass class FrameData: video: np.ndarray audio: torch.Tensor timestamp: float frame_id: int

streamer = WebSocketStreamer(host="0.0.0.0", port=7860)

def fast_check_ffmpeg(): try: subprocess.run(["ffmpeg", "-version"], capture_output=True, check=True) return True except: return False

def video2imgs(vid_path, save_path, ext='.png', cut_frame=10000000): cap = cv2.VideoCapture(vid_path) count = 0 while True: if count > cut_frame: break ret, frame = cap.read() if ret: cv2.imwrite(f"{save_path}/{count:08d}.png", frame) count += 1 else: break

def osmakedirs(path_list): for path in path_list: os.makedirs(path) if not os.path.exists(path) else None

@torch.no_grad() class Avatar: def init(self, avatar_id, video_path, bbox_shift, batch_size, preparation): self.avatar_id = avatar_id self.video_path = video_path self.bbox_shift = bbox_shift

    # 根据版本设置不同的基础路径
    if True:
        self.base_path = f"./results/v15/avatars/{avatar_id}"
    else:  # v1
        self.base_path = f"./results/avatars/{avatar_id}"

    self.avatar_path = self.base_path
    self.full_imgs_path = f"{self.avatar_path}/full_imgs"
    self.coords_path = f"{self.avatar_path}/coords.pkl"
    self.latents_out_path = f"{self.avatar_path}/latents.pt"
    self.video_out_path = f"{self.avatar_path}/vid_output/"
    self.mask_out_path = f"{self.avatar_path}/mask"
    self.mask_coords_path = f"{self.avatar_path}/mask_coords.pkl"
    self.avatar_info_path = f"{self.avatar_path}/avator_info.json"
    self.avatar_info = {
        "avatar_id": avatar_id,
        "video_path": video_path,
        "bbox_shift": bbox_shift,
        "version": 'v15'
    }
    self.preparation = preparation
    self.batch_size = batch_size
    self.idx = 0
    self.init()

def init(self):
    if self.preparation:
        if os.path.exists(self.avatar_path):
            response = input(f"{self.avatar_id} exists, Do you want to re-create it ? (y/n)")
            if response.lower() == "y":
                shutil.rmtree(self.avatar_path)
                print("*********************************")
                print(f"  creating avator: {self.avatar_id}")
                print("*********************************")
                osmakedirs([self.avatar_path, self.full_imgs_path, self.video_out_path, self.mask_out_path])
                self.prepare_material()
            else:
                self.input_latent_list_cycle = torch.load(self.latents_out_path)
                with open(self.coords_path, 'rb') as f:
                    self.coord_list_cycle = pickle.load(f)
                input_img_list = glob.glob(os.path.join(self.full_imgs_path, '*.[jpJP][pnPN]*[gG]'))
                input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
                self.frame_list_cycle = read_imgs(input_img_list)
                with open(self.mask_coords_path, 'rb') as f:
                    self.mask_coords_list_cycle = pickle.load(f)
                input_mask_list = glob.glob(os.path.join(self.mask_out_path, '*.[jpJP][pnPN]*[gG]'))
                input_mask_list = sorted(input_mask_list,
                                         key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
                self.mask_list_cycle = read_imgs(input_mask_list)
        else:
            print("*********************************")
            print(f"  creating avator: {self.avatar_id}")
            print("*********************************")
            osmakedirs([self.avatar_path, self.full_imgs_path, self.video_out_path, self.mask_out_path])
            self.prepare_material()
    else:
        if not os.path.exists(self.avatar_path):
            print(f"{self.avatar_id} does not exist, you should set preparation to True")
            sys.exit()

        with open(self.avatar_info_path, "r") as f:
            avatar_info = json.load(f)

        if avatar_info['bbox_shift'] != self.avatar_info['bbox_shift']:
            response = input(f" 【bbox_shift】 is changed, you need to re-create it ! (c/continue)")
            if response.lower() == "c":
                shutil.rmtree(self.avatar_path)
                print("*********************************")
                print(f"  creating avator: {self.avatar_id}")
                print("*********************************")
                osmakedirs([self.avatar_path, self.full_imgs_path, self.video_out_path, self.mask_out_path])
                self.prepare_material()
            else:
                sys.exit()
        else:
            self.input_latent_list_cycle = torch.load(self.latents_out_path)
            with open(self.coords_path, 'rb') as f:
                self.coord_list_cycle = pickle.load(f)
            input_img_list = glob.glob(os.path.join(self.full_imgs_path, '*.[jpJP][pnPN]*[gG]'))
            input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
            self.frame_list_cycle = read_imgs(input_img_list)
            with open(self.mask_coords_path, 'rb') as f:
                self.mask_coords_list_cycle = pickle.load(f)
            input_mask_list = glob.glob(os.path.join(self.mask_out_path, '*.[jpJP][pnPN]*[gG]'))
            input_mask_list = sorted(input_mask_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
            self.mask_list_cycle = read_imgs(input_mask_list)

def prepare_material(self):
    print("preparing data materials ... ...")
    with open(self.avatar_info_path, "w") as f:
        json.dump(self.avatar_info, f)

    if os.path.isfile(self.video_path):
        video2imgs(self.video_path, self.full_imgs_path, ext='png')
    else:
        print(f"copy files in {self.video_path}")
        files = os.listdir(self.video_path)
        files.sort()
        files = [file for file in files if file.split(".")[-1] == "png"]
        for filename in files:
            shutil.copyfile(f"{self.video_path}/{filename}", f"{self.full_imgs_path}/{filename}")
    input_img_list = sorted(glob.glob(os.path.join(self.full_imgs_path, '*.[jpJP][pnPN]*[gG]')))

    print("extracting landmarks...")
    coord_list, frame_list = get_landmark_and_bbox(input_img_list, self.bbox_shift)
    input_latent_list = []
    idx = -1
    # maker if the bbox is not sufficient
    coord_placeholder = (0.0, 0.0, 0.0, 0.0)
    for bbox, frame in zip(coord_list, frame_list):
        idx = idx + 1
        if bbox == coord_placeholder:
            continue
        x1, y1, x2, y2 = bbox
        if True:
            y2 = y2 + 10
            y2 = min(y2, frame.shape[0])
            coord_list[idx] = [x1, y1, x2, y2]  # 更新coord_list中的bbox
        crop_frame = frame[y1:y2, x1:x2]
        resized_crop_frame = cv2.resize(crop_frame, (256, 256), interpolation=cv2.INTER_LANCZOS4)
        latents = vae.get_latents_for_unet(resized_crop_frame)
        input_latent_list.append(latents)

    self.frame_list_cycle = frame_list + frame_list[::-1]
    self.coord_list_cycle = coord_list + coord_list[::-1]
    self.input_latent_list_cycle = input_latent_list + input_latent_list[::-1]
    self.mask_coords_list_cycle = []
    self.mask_list_cycle = []

    for i, frame in enumerate(tqdm(self.frame_list_cycle)):
        cv2.imwrite(f"{self.full_imgs_path}/{str(i).zfill(8)}.png", frame)

        x1, y1, x2, y2 = self.coord_list_cycle[i]
        if True:
            mode = 'jaw'
        else:
            mode = "raw"
        mask, crop_box = get_image_prepare_material(frame, [x1, y1, x2, y2], fp=fp, mode=mode)

        cv2.imwrite(f"{self.mask_out_path}/{str(i).zfill(8)}.png", mask)
        self.mask_coords_list_cycle += [crop_box]
        self.mask_list_cycle.append(mask)

    with open(self.mask_coords_path, 'wb') as f:
        pickle.dump(self.mask_coords_list_cycle, f)

    with open(self.coords_path, 'wb') as f:
        pickle.dump(self.coord_list_cycle, f)

    torch.save(self.input_latent_list_cycle, os.path.join(self.latents_out_path))

def process_frames(self, res_frame_queue, video_len, skip_save_images,sentence_id,whisper_chunks,gpu_id):

    print(video_len)
    while True:

        if self.idx >= video_len - 1:
            break
        try:
            start = time.time()
            res_frame = res_frame_queue.get(block=True, timeout=1)
        except queue.Empty:
            continue

        bbox = self.coord_list_cycle[self.idx % (len(self.coord_list_cycle))]
        ori_frame = copy.deepcopy(self.frame_list_cycle[self.idx % (len(self.frame_list_cycle))])
        x1, y1, x2, y2 = bbox
        try:
            res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
        except:
            continue
        mask = self.mask_list_cycle[self.idx % (len(self.mask_list_cycle))]
        mask_crop_box = self.mask_coords_list_cycle[self.idx % (len(self.mask_coords_list_cycle))]
        combine_frame = get_image_blending(ori_frame, res_frame, bbox, mask, mask_crop_box)
        # print(type(combine_frame),type(whisper_chunks[self.idx]))
        # data = {
        #     'sentence_id': sentence_id,
        #     'video': combine_frame,
        #     'audio': whisper_chunks[self.idx],
        #     'timing': time.time(),
        #     'idx': self.idx,
        #     'is_last':(self.idx == video_len-1)
        #                 }
        data = {
            'video': combine_frame,
            'audio': whisper_chunks[self.idx],
            'timing': time.time(),
            'idx': self.idx
                        }
        # if hasattr(self, 'streamer') and self.streamer:
        #     self.streamer.run_async(streamer.send_frame_data(data))

        # redis_manager.add_data(str(sentence_id), data)

        if skip_save_images is False:
            cv2.imwrite(f"{self.avatar_path}/tmp/{sentence_id}/{str(self.idx).zfill(8)}.png", combine_frame)
        self.idx = self.idx + 1

def inference(self, audio_path, sentence_id, fps, skip_save_images,audio_processor,weight_dtype,whisper,device,pe,timesteps,fp,unet,vae,gpu_id):
    if not skip_save_images:
        os.makedirs(self.avatar_path + f'/tmp/{sentence_id}', exist_ok=True)
    print("start inference")
    ############################################## extract audio feature ##############################################
    start_time = time.time()
    # Extract audio features
    whisper_input_features, librosa_length = audio_processor.get_audio_feature(audio_path,
                                                                               weight_dtype=weight_dtype)
    whisper_chunks = audio_processor.get_whisper_chunk(
        whisper_input_features,
        device,
        weight_dtype,
        whisper,
        librosa_length,
        fps=fps,
        audio_padding_length_left=2,
        audio_padding_length_right=2,
    )
    print(f"processing audio:{audio_path} costs {(time.time() - start_time) * 1000}ms")
    ############################################## inference batch by batch ##############################################
    video_num = len(whisper_chunks)
    res_frame_queue = queue.Queue()
    self.idx = 0
    # Create a sub-thread and start it
    process_thread = threading.Thread(target=self.process_frames,
                                      args=(res_frame_queue, video_num, skip_save_images,sentence_id,whisper_chunks,gpu_id))
    process_thread.start()

    gen = datagen(whisper_chunks,
                  self.input_latent_list_cycle,
                  self.batch_size)
    start_time = time.time()

    for i, (whisper_batch, latent_batch) in enumerate(gen):
    # for i, (whisper_batch, latent_batch) in enumerate(
    #         tqdm(gen, total=int(np.ceil(float(video_num) / self.batch_size)))):
        audio_feature_batch = pe(whisper_batch.to(device))
        latent_batch = latent_batch.to(device=device, dtype=unet.model.dtype)

        pred_latents = unet.model(latent_batch,
                                  timesteps,
                                  encoder_hidden_states=audio_feature_batch).sample
        pred_latents = pred_latents.to(device=device, dtype=vae.vae.dtype)
        recon = vae.decode_latents(pred_latents)
        for res_frame in recon:
            # print(len(recon))
            res_frame_queue.put(res_frame)
    # Close the queue and sub-thread after all tasks are completed
    process_thread.join()

    if skip_save_images is True:
        print('Total process time of {} frames without saving images = {}s'.format(
            video_num,
            time.time() - start_time))
    else:
        print('Total process time of {} frames including saving images = {}s'.format(
            video_num,
            time.time() - start_time))
    t4 = time.time()
    if sentence_id is not None and skip_save_images is False:
        # optional
        cmd_img2video = f"ffmpeg -y -v warning -r {fps} -f image2 -i {self.avatar_path}/tmp/{str(sentence_id)}/%08d.png -vcodec libx264 -vf format=yuv420p -crf 18 {self.avatar_path}/temp_{str(sentence_id)}.mp4"
        print(cmd_img2video)
        os.system(cmd_img2video)

        filename = Path(audio_path).stem  # 直接获取无扩展名的文件名
        # print('filename',filename,sentence_id)

        output_vid = os.path.join('/mnt/data/xb/thg_output_mp4/', str(filename) + ".mp4")  # on
        cmd_combine_audio = f"ffmpeg -y -v warning -i {audio_path} -i {self.avatar_path}/temp_{str(sentence_id)}.mp4 {output_vid}"
        print(cmd_combine_audio)
        os.system(cmd_combine_audio)

        os.remove(f"{self.avatar_path}/temp_{sentence_id}.mp4")
        shutil.rmtree(f"{self.avatar_path}/tmp/{sentence_id}")
        print(f"result is save to {output_vid}")
    print('保存耗时',time.time()-t4)
    print("\n")

def gpu_worker(rank, gpu_id, task_queue, args): """GPU工作线程:加载模型后,从队列获取任务执行推理""" torch.cuda.set_device(gpu_id) device = torch.device(f"cuda:{gpu_id}") # 1. 初始化模型(每个GPU独立加载) print(f"GPU {gpu_id} loading models...") # check_global_vars() vae, unet, pe = load_all_model( unet_model_path=args.unet_model_path, vae_type=args.vae_type, unet_config=args.unet_config, device=device ) timesteps = torch.tensor([0], device=device) pe = pe.half().to(device) vae.vae = vae.vae.half().to(device) unet.model = unet.model.half().to(device)

# Initialize audio processor and Whisper model 加载音频处理模型和Whisper语音识别模型
audio_processor = AudioProcessor(feature_extractor_path=args.whisper_dir)
weight_dtype = unet.model.dtype
whisper = WhisperModel.from_pretrained(args.whisper_dir)
whisper = whisper.to(device=device, dtype=weight_dtype).eval()
whisper.requires_grad_(False)

# Initialize face parser with configurable parameters based on version
if args.version == "v15":
    fp = FaceParsing(
        left_cheek_width=args.left_cheek_width,
        right_cheek_width=args.right_cheek_width
    )
else:  # v1
    fp = FaceParsing()
data_preparation = False
all_avatar={
    'avator_xu': 'data/video/ava1.mp4',
    'avator_xu_2': 'data/video/ava2.mp4',
    'avator480': 'data/video/ava480.mp4',
}
avatar_id = args.avatarid
video_path = all_avatar.get(avatar_id)
print('开始加载avatar')
avatar = Avatar(
    avatar_id=avatar_id,
    video_path=video_path,
    bbox_shift=args.bbox_shift,
    batch_size=args.batch_size,
    preparation=data_preparation,

 )
while True:
        try:
            audio_path, sentence_id = task_queue.get(timeout=5)  # 非阻塞获取任务
            print(f"GPU {gpu_id} processing: {audio_path}")
            t0 = time.time()
            avatar.inference(audio_path, sentence_id, args.fps, args.skip_save_images,audio_processor,weight_dtype,whisper,device,pe,timesteps,fp,unet,vae,gpu_id)
            t1 = time.time()
            print(f"GPU {gpu_id} finished in {t1 - t0:.2f}s")
        except Empty:
            if not args.keep_alive:
                print(f"GPU {gpu_id} stopping due to empty queue")
                break
            continue
        except Exception as e:
            print(f"GPU {gpu_id} error: {str(e)}")

def main(): ''' This script is used to simulate online chatting and applies necessary pre-processing such as face detection and face parsing in advance. During online chatting, only UNet and the VAE decoder are involved, which makes MuseTalk real-time. ''' t2 = time.time() parser = argparse.ArgumentParser() parser.add_argument("--version", type=str, default="v15", choices=["v1", "v15"], help="Version of MuseTalk: v1 or v15") parser.add_argument("--ffmpeg_path", type=str, default="./ffmpeg-4.4-amd64-static/", help="Path to ffmpeg executable") parser.add_argument("--gpu_ids", type=str, default="4,5,6,7", help="GPU ID to use") parser.add_argument("--vae_type", type=str, default="sd-vae", help="Type of VAE model") parser.add_argument("--unet_config", type=str, default="./models/musetalk/musetalk.json", help="Path to UNet configuration file") parser.add_argument("--unet_model_path", type=str, default="./models/musetalk/pytorch_model.bin", help="Path to UNet model weights") parser.add_argument("--whisper_dir", type=str, default="./models/whisper", help="Directory containing Whisper model") parser.add_argument("--inference_config", type=str, default="configs/inference/realtime.yaml") parser.add_argument("--bbox_shift", type=int, default=-5, help="Bounding box shift value") parser.add_argument("--result_dir", default='./results', help="Directory for output results") parser.add_argument("--extra_margin", type=int, default=10, help="Extra margin for face cropping") parser.add_argument("--fps", type=int, default=25, help="Video frames per second") parser.add_argument("--audio_padding_length_left", type=int, default=2, help="Left padding length for audio") parser.add_argument("--audio_padding_length_right", type=int, default=2, help="Right padding length for audio") parser.add_argument("--batch_size", type=int, default=4, help="Batch size for inference") parser.add_argument("--output_vid_name", type=str, default=None, help="Name of output video file") parser.add_argument("--use_saved_coord", default=True, help='Use saved coordinates to save time') parser.add_argument("--saved_coord", action="store_true", help='Save coordinates for future use') parser.add_argument("--parsing_mode", default='jaw', help="Face blending parsing mode") parser.add_argument("--left_cheek_width", type=int, default=90, help="Width of left cheek region") parser.add_argument("--right_cheek_width", type=int, default=90, help="Width of right cheek region") parser.add_argument("--skip_save_images", default=True, help="Whether skip saving images for better generation speed calculation", ) parser.add_argument("--keep_alive",default=True,) parser.add_argument("--avatarid",type=str, default='avator480', ) args = parser.parse_args()

# Configure ffmpeg path
if not fast_check_ffmpeg():
    print("Adding ffmpeg to PATH")
    # Choose path separator based on operating system
    path_separator = ';' if sys.platform == 'win32' else ':'
    os.environ["PATH"] = f"{args.ffmpeg_path}{path_separator}{os.environ['PATH']}"
    if not fast_check_ffmpeg():
        print("Warning: Unable to find ffmpeg, please ensure ffmpeg is properly installed")

# 初始化WebSocket服务器

streamer = WebSocketStreamer(host="0.0.0.0", port=7860)
# 在后台线程中启动WebSocket服务器
ws_thread = threading.Thread(target=streamer.start_background, daemon=True)
ws_thread.start()


# 共享任务队列
ctx = mp.get_context('spawn')
task_queue = ctx.Queue()
# 启动GPU工作进程
gpu_ids = [int(id) for id in args.gpu_ids.split(",")]
processes = []
print('启动多卡模型加载')#4567
for rank, gpu_id in enumerate(gpu_ids):
    p = ctx.Process(
        target=gpu_worker,
        args=(rank, gpu_id, task_queue, args)
    )
    p.start()
    processes.append(p)

# 启动mp3文件监听
print('启动文件监听线程')
audio_stream_path='/mnt/data/xb/tts_output_mp3'
watcher = FolderWatcher(audio_stream_path)
watcher.start()



# 主线程:监听mp3_queue并分配任务
try:
    while True:
        if len(mp3_queue) > 0:
            audio_path = mp3_queue[0]
            sentence_number_str = audio_path.split('_')[-1].split('.')[0]  # 得到 '0001'
            sentence_number_int = int(sentence_number_str) #指示第几句话
            del mp3_queue[0]
            task_queue.put((audio_path, sentence_number_int))  # 发送任务到空闲GPU

        time.sleep(0.1)  # 避免CPU空转
except KeyboardInterrupt:
    print("Stopping workers...")
    watcher.stop()
    watcher.join()
    print("\n正在关闭服务...")
    # 停止WebSocket服务器
    asyncio.run_coroutine_threadsafe(streamer.stop_server(), streamer.loop)
    ws_thread.join(timeout=5)
finally:
    if not args.keep_alive:
        for _ in range(len(gpu_ids)):  # 发送终止信号
            task_queue.put((None, None))
    for p in processes:
        p.join()

if name == "main": main()

xub69961415 avatar Aug 25 '25 02:08 xub69961415

里面需要自己改一下输出的默认文件夹和gpuid等,websocket可以注释掉,这块我之前用来传输实时输出的

xub69961415 avatar Aug 25 '25 02:08 xub69961415