cog icon indicating copy to clipboard operation
cog copied to clipboard

cog.server.exceptions.FatalWorkerException: Predictor errored during setup: Ran out of input

Open tzktz opened this issue 1 year ago • 0 comments

Hi @mattt , @zeke ..


class Predictor(BasePredictor):

    def setup(self):

        os.makedirs('weights', exist_ok=True)
        os.chdir('weights')
        if not os.path.exists('Swin2SR_ClassicalSR_X4_64.pth'):
            os.system(
                'wget https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth'
            )
        if not os.path.exists('GFPGANv1.4.pth'):
            os.system(
                'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'
            )
        if not os.path.exists('inswapper_128.onnx'):
            os.system(
                'wget https://huggingface.co/ashleykleynhans/inswapper/resolve/main/inswapper_128.onnx'
            )
        if not os.path.exists('Swin2SR_CompressedSR_X4_48.pth'):
            os.system(
                'wget https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth'
            )
        if not os.path.exists('Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth'):
            os.system(
                'wget https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth'
            )

        os.chdir('..')
        if not os.path.exists('model.pth'):
            os.system(
                'wget https://github.com/codeplugtech/Background_remover/releases/download/v0.0.1/model.pth'
            )

        """Initialize the Predictor"""
        self.device = "cpu"
        self.setup_super_resolution()
        
        self.simple_lama = SimpleLama()

        # Initialize background removal model
        self.net = BriaRMBG()
        model_path = "model.pth"
        self.net.load_state_dict(torch.load(model_path, map_location=self.device))
        self.net.to(self.device).eval()

        """Load the model into memory to make running multiple predictions efficient"""
        self.face_swapper = insightface.model_zoo.get_model('weights/inswapper_128.onnx',
                                                            providers=onnxruntime.get_available_providers())
        self.face_enhancer = gfpgan.GFPGANer(model_path='weights/GFPGANv1.4.pth', upscale=1)
        self.face_analyser = FaceAnalysis(name='buffalo_l')
        self.face_analyser.prepare(ctx_id=0, det_size=(640, 640))

    def get_face(self, img_data):
        analysed = self.face_analyser.get(img_data)
        try:
            largest = max(analysed, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
            return largest
        except:
            print("No face found")
            return None

    def setup_super_resolution(self):
        """Load the super-resolution model into memory"""
        print("Loading super-resolution models...")

        args = Namespace()
        args.scale = 4
        args.large_model = False

        tasks = ["classical_sr", "compressed_sr", "real_sr"]
        paths = [
            "weights/Swin2SR_ClassicalSR_X4_64.pth",
            "weights/Swin2SR_CompressedSR_X4_48.pth",
            "weights/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth",
        ]
        sizes = [64, 48, 128]

        self.models = {}
        for task, path, size in zip(tasks, paths, sizes):
            args.training_patch_size = size
            args.task, args.model_path = task, path
            print(f"Loading model for task: {task}, path: {path}, size: {size}")
            self.models[task] = define_model(args)
            self.models[task].eval()
            self.models[task] = self.models[task].to(self.device)
            print(f"Model for task {task} loaded successfully.")

i have load multi modles in cog it works finely in locall...when i pushed into an replicate below setup error occures.. Screenshot (63)

tzktz avatar Feb 20 '24 05:02 tzktz