pytorch-nested-unet icon indicating copy to clipboard operation
pytorch-nested-unet copied to clipboard

How can I predict segmentation results after training?

Open simi-ck opened this issue 2 years ago • 12 comments

After training, if the segmentation result is predicted with the given image? Thanks!

simi-ck avatar Sep 15 '22 06:09 simi-ck

def letterbox_image(image, size):
image = image.convert("RGB")
iw, ih = image.size#original size
w, h = size#needed size
scale = min(w/iw, h/ih)#find the smaller one
nw = int(iwscale)
nh = int(ihscale)

image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))#gray background
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image,nw,nh

image = Image.open()
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]

resize and regularization
image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h']))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)

with torch.no_grad():
images = images.cuda()
output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
output = torch.sigmoid(output).cpu().numpy()

print(output.shape)
seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8')

image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()

You can use this to predict a given image.

LisaShen0509 avatar Dec 08 '22 15:12 LisaShen0509

The results obtained using this code and val.py seems to be different? Is there a test.py script somewhere?

junxiant avatar Feb 09 '23 11:02 junxiant

已收到您的邮件!

LisaShen0509 avatar Feb 09 '23 11:02 LisaShen0509

Maybe it will be better if i show the images,

These are the outputs from the code above image

After adding a threshold

 output = torch.where(output > 0.6, 1, 0)

image

These are the outputs from val.py image

junxiant avatar Feb 09 '23 12:02 junxiant

Maybe it will be better if i show the images,

These are the outputs from the code above image

After adding a threshold

 output = torch.where(output > 0.6, 1, 0)

image

These are the outputs from val.py image

Sorry for the late reply. I ran the above code and got the same result as val.py. Maybe you used different thresholds to process the masks? image

LisaShen0509 avatar Mar 09 '23 08:03 LisaShen0509

def letterbox_image(image, size):
image = image.convert("RGB")
iw, ih = image.size#original size
w, h = size#needed size
scale = min(w/iw, h/ih)#find the smaller one
nw = int(iwscale)
nh = int(ihscale)

image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))#gray background
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image,nw,nh

image = Image.open()
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]

resize and regularization
image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h']))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)

with torch.no_grad():
images = images.cuda()
output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
output = torch.sigmoid(output).cpu().numpy()

print(output.shape)
seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8')

image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()

In here, do you mean nw = int(iw * scale)?

nw = int(iwscale) nh = int(ihscale)

marrylin2019 avatar May 29 '23 05:05 marrylin2019

已收到您的邮件!

LisaShen0509 avatar May 29 '23 05:05 LisaShen0509

def letterbox_image(image, size):
image = image.convert("RGB")
iw, ih = image.size#original size
w, h = size#needed size
scale = min(w/iw, h/ih)#find the smaller one
nw = int(iwscale)
nh = int(ihscale)

image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))#gray background
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image,nw,nh

image = Image.open()
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]

resize and regularization
image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h']))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)

with torch.no_grad():
images = images.cuda()
output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
output = torch.sigmoid(output).cpu().numpy()

print(output.shape)
seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8')

image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()

In here, do you mean nw = int(iw * scale)?

nw = int(iwscale) nh = int(ihscale)

Yes, sorry for that

LisaShen0509 avatar May 29 '23 05:05 LisaShen0509

This is the predict.py code that works for me, thanks @didnttaken

import torch
from torchvision.transforms import ToTensor
from PIL import Image
import archs
import numpy as np

# Load the model
model = archs.__dict__['NestedUNet'](1, 3, False)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

model.load_state_dict(torch.load('models/dsb2018_96_NestedUNet_woDS/model.pth')) # Change this to your path
model.eval()


def letterbox_image(image, size):
    image = image.convert("RGB")
    iw, ih = image.size#original size
    w, h = size#needed size
    scale = min(w/iw, h/ih)#find the smaller one
    nw = int(iw*scale)
    nh = int(ih*scale)

    image = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128,128,128))#gray background
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    return new_image,nw,nh

image = Image.open('test2.png') # Change this to your image
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]

input_width = 96
input_height = 96

#resize and regularization
image, nw, nh = letterbox_image(image,(input_width, input_height))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)

with torch.no_grad():
    images = images.cuda()
    output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
    output = torch.sigmoid(output).cpu().numpy()

print(output.shape)
seg_img=(output[0,0,int((input_height - nh)//2):int((input_height - nh)//2+nh), int((input_width - nw)//2):int((input_width  -nw)//2+nw)]* 255).astype('uint8')

image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()

mornedev avatar Jun 29 '23 22:06 mornedev

已收到您的邮件!

LisaShen0509 avatar Jun 29 '23 22:06 LisaShen0509

You are right, I used different thresholding

junxiant avatar Jul 16 '23 17:07 junxiant

已收到您的邮件!

LisaShen0509 avatar Jul 16 '23 17:07 LisaShen0509