deepcell-tf
deepcell-tf copied to clipboard
Question about Tuning Parameters to get Voronoi Plots
I had a query regarding tuning the model for the TissueNet dataset taken from https://datasets.deepcell.org/data. The current shape of training and testing data is (10404,256,256,2)
The model was run as follows :
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from deepcell.model_zoo.panopticnet import PanopticNet
classes = {
'inner_distance': 1, # inner distance
'outer_distance': 1, # outer distance
'fgbg': 2, # foreground/background separation
}
model = PanopticNet(
backbone='resnet50',
input_shape=X_train.shape[1:],
norm_method='std',
num_semantic_classes=classes)
from tensorflow.keras.optimizers import SGD, Adam
from deepcell.utils.train_utils import rate_scheduler
model_name = 'watershed_centroid_nuclear_general_std'
n_epoch = 5 # Number of training epochs
test_size = .20 # % of data saved as test
norm_method = 'whole_image' # data normalization
lr = 1e-5
optimizer = Adam(learning_rate=lr, clipnorm=0.001)
lr_sched = rate_scheduler(lr=lr, decay=0.99)
batch_size = 1
min_objects = 3 # throw out images with fewer than this many objects
from deepcell import image_generators
from deepcell.utils import train_utils
transforms = list(classes.keys())
transforms_kwargs = {'outer-distance': {'erosion_width': 0}}
# use augmentation for training but not validation
datagen = image_generators.SemanticDataGenerator(
rotation_range=180,
shear_range=0,
zoom_range=(0.75, 1.25),
horizontal_flip=True,
vertical_flip=True)
datagen_val = image_generators.SemanticDataGenerator(
rotation_range=0,
shear_range=0,
zoom_range=0,
horizontal_flip=0,
vertical_flip=0)
train_data = datagen.flow(
{'X': X_train, 'y': y_train},
seed=seed,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
min_objects=min_objects,
batch_size=batch_size)
val_data = datagen_val.flow(
{'X': X_test, 'y': y_test},
seed=seed,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
min_objects=min_objects,
batch_size=batch_size)
from matplotlib import pyplot as plt
inputs, outputs = train_data.next()
img = inputs[0]
inner_distance = outputs[0]
outer_distance = outputs[1]
fgbg = outputs[2]
fig, axes = plt.subplots(1, 4, figsize=(15, 15))
axes[0].imshow(img[..., 0])
axes[0].set_title('Source Image')
axes[1].imshow(inner_distance[0, ..., 0])
axes[1].set_title('Inner Distance')
axes[2].imshow(outer_distance[0, ..., 0])
axes[2].set_title('Outer Distance')
axes[3].imshow(fgbg[0, ..., 0])
axes[3].set_title('Foreground/Background')
plt.show()
from timeit import default_timer
start = default_timer()
test_images = prediction_model.predict(X_test)
watershed_time = default_timer() - start
print('Watershed segmentation of shape', test_images[0].shape, 'in', watershed_time, 'seconds.')
import time
from matplotlib import pyplot as plt
import numpy as np
from skimage.feature import peak_local_max
from deepcell_toolbox.deep_watershed import deep_watershed
index = np.random.choice(X_test.shape[0])
print(index)
fig, axes = plt.subplots(1, 4, figsize=(20, 20))
masks = deep_watershed(
test_images,
min_distance=10,
detection_threshold=0.1,
distance_threshold=0.01,
exclude_border=False,
small_objects_threshold=0)
# calculated in the postprocessing above, but useful for visualizing
inner_distance = test_images[0]
outer_distance = test_images[1]
coords = peak_local_max(
inner_distance[index],
min_distance=10,
threshold_abs=0.1,
exclude_border=False)
# raw image with centroid
axes[0].imshow(X_test[index, ..., 0])
axes[0].scatter(coords[..., 1], coords[..., 0],
color='r', marker='.', s=10)
axes[1].imshow(inner_distance[index, ..., 0], cmap='jet')
axes[2].imshow(outer_distance[index, ..., 0], cmap='jet')
axes[3].imshow(masks[index, ...], cmap='jet')
plt.show()
How to tune the parameters in a way that we obtain plots where the red dots align perfectly on the segmented cell data from which Voronoi plots can be generated? Will reshaping the data solve the issue?
It is difficult to say - there is a lot going on with the example. It sounds like an image alignment issue. At first glance I doubt this is a matter of parameter tuning, but rather more likely related to what is being compared. I would recommend focusing on the shapes and dimensions of the inputs/outputs at each stage of the analysis, and careful review of the docstrings to ensure you understand any transforms that are applied to the images.