pytorch-nested-unet
pytorch-nested-unet copied to clipboard
How can I predict segmentation results after training?
After training, if the segmentation result is predicted with the given image? Thanks!
def letterbox_image(image, size):
image = image.convert("RGB")
iw, ih = image.size#original size
w, h = size#needed size
scale = min(w/iw, h/ih)#find the smaller one
nw = int(iwscale)
nh = int(ihscale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))#gray background
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image,nw,nh
image = Image.open()
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
resize and regularization
image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h']))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)
with torch.no_grad():
images = images.cuda()
output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
output = torch.sigmoid(output).cpu().numpy()
print(output.shape)
seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8')
image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()
You can use this to predict a given image.
The results obtained using this code and val.py seems to be different? Is there a test.py script somewhere?
已收到您的邮件!
Maybe it will be better if i show the images,
These are the outputs from the code above
After adding a threshold
output = torch.where(output > 0.6, 1, 0)
These are the outputs from val.py
Maybe it will be better if i show the images,
These are the outputs from the code above
After adding a threshold
output = torch.where(output > 0.6, 1, 0)
These are the outputs from val.py
Sorry for the late reply. I ran the above code and got the same result as val.py. Maybe you used different thresholds to process the masks?
def letterbox_image(image, size): image = image.convert("RGB") iw, ih = image.size#original size w, h = size#needed size scale = min(w/iw, h/ih)#find the smaller one nw = int(iwscale) nh = int(ihscale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128))#gray background new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image,nw,nh image = Image.open() image = image.convert('RGB') orininal_h = np.array(image).shape[0] orininal_w = np.array(image).shape[1] resize and regularization image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h'])) images = [np.array(image)/255] images = np.transpose(images,(0,3,1,2))#change coordinate images = torch.from_numpy(images).type(torch.FloatTensor) with torch.no_grad(): images = images.cuda() output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width output = torch.sigmoid(output).cpu().numpy() print(output.shape) seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8') image = Image.fromarray(seg_img).resize((orininal_w,orininal_h)) image.save("nestedunetmask.png") torch.cuda.empty_cache()
In here, do you mean nw = int(iw * scale)?
nw = int(iwscale) nh = int(ihscale)
已收到您的邮件!
def letterbox_image(image, size): image = image.convert("RGB") iw, ih = image.size#original size w, h = size#needed size scale = min(w/iw, h/ih)#find the smaller one nw = int(iwscale) nh = int(ihscale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128))#gray background new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image,nw,nh image = Image.open() image = image.convert('RGB') orininal_h = np.array(image).shape[0] orininal_w = np.array(image).shape[1] resize and regularization image, nw, nh = letterbox_image(image,(config['input_w'],config['input_h'])) images = [np.array(image)/255] images = np.transpose(images,(0,3,1,2))#change coordinate images = torch.from_numpy(images).type(torch.FloatTensor) with torch.no_grad(): images = images.cuda() output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width output = torch.sigmoid(output).cpu().numpy() print(output.shape) seg_img=(output[0,0,int((config['input_h']-nh)//2):int((config['input_h']-nh)//2+nh), int((config['input_w']-nw)//2):int((config['input_w']-nw)//2+nw)]* 255).astype('uint8') image = Image.fromarray(seg_img).resize((orininal_w,orininal_h)) image.save("nestedunetmask.png") torch.cuda.empty_cache()
In here, do you mean nw = int(iw * scale)?
nw = int(iwscale) nh = int(ihscale)
Yes, sorry for that
This is the predict.py code that works for me, thanks @didnttaken
import torch
from torchvision.transforms import ToTensor
from PIL import Image
import archs
import numpy as np
# Load the model
model = archs.__dict__['NestedUNet'](1, 3, False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.load_state_dict(torch.load('models/dsb2018_96_NestedUNet_woDS/model.pth')) # Change this to your path
model.eval()
def letterbox_image(image, size):
image = image.convert("RGB")
iw, ih = image.size#original size
w, h = size#needed size
scale = min(w/iw, h/ih)#find the smaller one
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))#gray background
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image,nw,nh
image = Image.open('test2.png') # Change this to your image
image = image.convert('RGB')
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
input_width = 96
input_height = 96
#resize and regularization
image, nw, nh = letterbox_image(image,(input_width, input_height))
images = [np.array(image)/255]
images = np.transpose(images,(0,3,1,2))#change coordinate
images = torch.from_numpy(images).type(torch.FloatTensor)
with torch.no_grad():
images = images.cuda()
output = model(images)#input shape should be[1, 3, H, W] batchsize, channels, height, width
output = torch.sigmoid(output).cpu().numpy()
print(output.shape)
seg_img=(output[0,0,int((input_height - nh)//2):int((input_height - nh)//2+nh), int((input_width - nw)//2):int((input_width -nw)//2+nw)]* 255).astype('uint8')
image = Image.fromarray(seg_img).resize((orininal_w,orininal_h))
image.save("nestedunetmask.png")
torch.cuda.empty_cache()
已收到您的邮件!
You are right, I used different thresholding
已收到您的邮件!