Deconvnet-keras
Deconvnet-keras copied to clipboard
A Pull-request for new implementation to adapt to Keras 2.0?
Hi @jalused ,
Your code is very helpful for me. Thank you so much. However, it took a while to adapt it to the new version of Keras to make it executable, for instance, the ordering of input.shape is changed. How about I make a PR to your repo, adding a jupyter notebook file to explain how to use it in Keras 2.0?
Below are my modification of your classes:
class DConvolution2D(object):
'''
A class to define forward and backward operation on Convolution2D
'''
def __init__(self, layer):
'''
# Arguments
layer: an instance of Convolution2D layer, whose configuration
will be used to initiate DConvolution2D(input_shape,
output_shape, weights)
'''
self.layer = layer
weights = layer.get_weights()
W, b = weights
config = layer.get_config()
# Set up_func for DConvolution2D
input = Input(shape = layer.input_shape[1:])
output = Convolution2D.from_config(config)(input)
up_func = Model(input, output)
up_func.layers[1].set_weights(weights)
self.up_func = up_func
# Flip W horizontally and vertically,
# and set down_func for DConvolution2D
W = np.transpose(W, (1, 0, 3, 2))
W = W[:, :, ::-1, ::-1]
config['filters'] = W.shape[3]
config['kernel_size'] = (W.shape[0], W.shape[1])
b = np.zeros(config['filters'])
input = Input(shape = layer.output_shape[1:])
output = Convolution2D.from_config(config)(input)
down_func = Model(input, output)
down_func.layers[1].set_weights((W, b))
self.down_func = down_func
def up(self, data):
'''
function to compute Convolution output in forward pass
# Arguments
data: Data to be operated in forward pass
# Returns
Convolved result
'''
self.up_data = self.up_func.predict(data)
return self.up_data
def down(self, data, learning_phase = 0):
'''
function to compute Deconvolution output in backward pass
# Arguments
data: Data to be operated in backward pass
# Returns
Deconvolved result
'''
self.down_data= self.down_func.predict(data)
return self.down_data
class DDense(object):
'''
A class to define forward and backward operation on Dense
'''
def __init__(self, layer):
'''
# Arguments
layer: an instance of Dense layer, whose configuration
will be used to initiate DDense(input_shape,
output_shape, weights)
'''
self.layer = layer
weights = layer.get_weights()
W, b = weights
config = layer.get_config()
#Set up_func for DDense
input = Input(shape = layer.input_shape[1:])
output = Dense.from_config(config)(input)
up_func = Model(input, output)
up_func.set_weights(weights)
self.up_func = up_func
#Transpose W and set down_func for DDense
W = W.transpose()
self.input_shape = layer.input_shape
self.output_shape = layer.output_shape
b = np.zeros(self.input_shape[1])
flipped_weights = [W, b]
input = Input(shape = self.output_shape[1:])
output = Dense(output_dim = self.input_shape[1])(input)
down_func = Model(input, output)
down_func.set_weights(flipped_weights)
self.down_func = down_func
def up(self, data):
'''
function to compute dense output in forward pass
# Arguments
data: Data to be operated in forward pass
# Returns
Result of dense layer
'''
self.up_data = self.up_func.predict(data, learning_phase)
return self.up_data
def down(self, data):
'''
function to compute dense output in backward pass
# Arguments
data: Data to be operated in forward pass
# Returns
Result of reverse dense layer
'''
# data = data - self.bias
self.down_data = self.down_func.predict(data)
return self.down_data
class DPooling(object):
'''
A class to define forward and backward operation on Pooling
'''
def __init__(self, layer):
'''
# Arguments
layer: an instance of Pooling layer, whose configuration
will be used to initiate DPooling(input_shape,
output_shape, weights)
'''
self.layer = layer
self.poolsize = layer.pool_size
# self.poolsize = layer.poolsize
def up(self, data):
'''
function to compute pooling output in forward pass
# Arguments
data: Data to be operated in forward pass
# Returns
Pooled result
'''
[self.up_data, self.switch] = \
self.__max_pooling_with_switch(data, self.poolsize)
return self.up_data
def down(self, data, learning_phase = 0):
'''
function to compute unpooling output in backward pass
# Arguments
data: Data to be operated in forward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
Unpooled result
'''
self.down_data = self.__max_unpooling_with_switch(data, self.switch)
return self.down_data
def __max_pooling_with_switch(self, input, poolsize):
'''
Compute pooling output and switch in forward pass, switch stores
location of the maximum value in each poolsize * poolsize block
# Arguments
input: data to be pooled
poolsize: size of pooling operation
# Returns
Pooled result and Switch
'''
switch = np.zeros(input.shape)
out_shape = list(input.shape)
row_poolsize = int(poolsize[0])
col_poolsize = int(poolsize[1])
out_shape[1] = out_shape[1] // poolsize[0]
out_shape[2] = out_shape[2] // poolsize[1]
pooled = np.zeros(out_shape)
for sample in range(input.shape[0]):
for dim in range(input.shape[3]):
for row in range(out_shape[1]):
for col in range(out_shape[2]):
patch = input[sample,
row * row_poolsize : (row + 1) * row_poolsize,
col * col_poolsize : (col + 1) * col_poolsize,
dim]
max_value = patch.max()
pooled[sample, row, col, dim] = max_value
max_col_index = patch.argmax(axis = -1)
max_cols = patch.max(axis = -1)
max_row = max_cols.argmax()
max_col = max_col_index[max_row]
switch[sample,
row * row_poolsize + max_row,
col * col_poolsize + max_col,
dim] = 1
return [pooled, switch]
# Compute unpooled output using pooled data and switch
def __max_unpooling_with_switch(self, input, switch):
'''
Compute unpooled output using pooled data and switch
# Arguments
input: data to be pooled
poolsize: size of pooling operation
switch: switch storing location of each elements
# Returns
Unpooled result
'''
out_shape = switch.shape
unpooled = np.zeros(out_shape)
for sample in range(input.shape[0]):
for dim in range(input.shape[3]):
tile = np.ones((switch.shape[1] // input.shape[1],
switch.shape[2] // input.shape[2]))
out = np.kron(input[sample, :, :, dim], tile)
unpooled[sample, :, :, dim] = out * switch[sample, :, :, dim]
return unpooled
class DActivation(object):
'''
A class to define forward and backward operation on Activation
'''
def __init__(self, layer, linear = False):
'''
# Arguments
layer: an instance of Activation layer, whose configuration
will be used to initiate DActivation(input_shape,
output_shape, weights)
'''
self.layer = layer
self.linear = linear
self.activation = layer.activation
input = K.placeholder(shape = layer.output_shape)
output = self.activation(input)
# According to the original paper,
# In forward pass and backward pass, do the same activation(relu)
self.up_func = K.function(
[input, K.learning_phase()], [output])
self.down_func = K.function(
[input, K.learning_phase()], [output])
# Compute activation in forward pass
def up(self, data, learning_phase = 0):
'''
function to compute activation in forward pass
# Arguments
data: Data to be operated in forward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
Activation
'''
self.up_data = self.up_func([data, learning_phase])[0]
return self.up_data
# Compute activation in backward pass
def down(self, data, learning_phase = 0):
'''
function to compute activation in backward pass
# Arguments
data: Data to be operated in backward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
Activation
'''
self.down_data = self.down_func([data, learning_phase])[0]
return self.down_data
class DFlatten(object):
'''
A class to define forward and backward operation on Flatten
'''
def __init__(self, layer):
'''
# Arguments
layer: an instance of Flatten layer, whose configuration
will be used to initiate DFlatten(input_shape,
output_shape, weights)
'''
self.layer = layer
self.shape = layer.input_shape[1:]
self.up_func = K.function(
[layer.input, K.learning_phase()], [layer.output])
# Flatten 2D input into 1D output
def up(self, data, learning_phase = 0):
'''
function to flatten input in forward pass
# Arguments
data: Data to be operated in forward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
Flattened data
'''
self.up_data = self.up_func([data, learning_phase])[0]
return self.up_data
# Reshape 1D input into 2D output
def down(self, data, learning_phase = 0):
'''
function to unflatten input in backward pass
# Arguments
data: Data to be operated in backward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
Recovered data
'''
new_shape = [data.shape[0]] + list(self.shape)
assert np.prod(self.shape) == np.prod(data.shape[1:])
self.down_data = np.reshape(data, new_shape)
return self.down_data
class DInput(object):
'''
A class to define forward and backward operation on Input
'''
def __init__(self, layer):
'''
# Arguments
layer: an instance of Input layer, whose configuration
will be used to initiate DInput(input_shape,
output_shape, weights)
'''
self.layer = layer
# input and output of Inputl layer are the same
def up(self, data, learning_phase = 0):
'''
function to operate input in forward pass, the input and output
are the same
# Arguments
data: Data to be operated in forward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
data
'''
self.up_data = data
return self.up_data
def down(self, data, learning_phase = 0):
'''
function to operate input in backward pass, the input and output
are the same
# Arguments
data: Data to be operated in backward pass
learning_phase: learning_phase of Keras, 1 or 0
# Returns
data
'''
self.down_data = data
return self.down_data
Hi @jalused ,
Your code is very helpful for me. Thank you so much. However, it took a while to adapt it to the new version of Keras to make it executable, for instance, the ordering of input.shape is changed. How about I make a PR to your repo, adding a jupyter notebook file to explain how to use it in Keras 2.0?
Below are my modification of your classes:
class DConvolution2D(object): ''' A class to define forward and backward operation on Convolution2D ''' def __init__(self, layer): ''' # Arguments layer: an instance of Convolution2D layer, whose configuration will be used to initiate DConvolution2D(input_shape, output_shape, weights) ''' self.layer = layer weights = layer.get_weights() W, b = weights config = layer.get_config() # Set up_func for DConvolution2D input = Input(shape = layer.input_shape[1:]) output = Convolution2D.from_config(config)(input) up_func = Model(input, output) up_func.layers[1].set_weights(weights) self.up_func = up_func # Flip W horizontally and vertically, # and set down_func for DConvolution2D W = np.transpose(W, (1, 0, 3, 2)) W = W[:, :, ::-1, ::-1] config['filters'] = W.shape[3] config['kernel_size'] = (W.shape[0], W.shape[1]) b = np.zeros(config['filters']) input = Input(shape = layer.output_shape[1:]) output = Convolution2D.from_config(config)(input) down_func = Model(input, output) down_func.layers[1].set_weights((W, b)) self.down_func = down_func def up(self, data): ''' function to compute Convolution output in forward pass # Arguments data: Data to be operated in forward pass # Returns Convolved result ''' self.up_data = self.up_func.predict(data) return self.up_data def down(self, data, learning_phase = 0): ''' function to compute Deconvolution output in backward pass # Arguments data: Data to be operated in backward pass # Returns Deconvolved result ''' self.down_data= self.down_func.predict(data) return self.down_data class DDense(object): ''' A class to define forward and backward operation on Dense ''' def __init__(self, layer): ''' # Arguments layer: an instance of Dense layer, whose configuration will be used to initiate DDense(input_shape, output_shape, weights) ''' self.layer = layer weights = layer.get_weights() W, b = weights config = layer.get_config() #Set up_func for DDense input = Input(shape = layer.input_shape[1:]) output = Dense.from_config(config)(input) up_func = Model(input, output) up_func.set_weights(weights) self.up_func = up_func #Transpose W and set down_func for DDense W = W.transpose() self.input_shape = layer.input_shape self.output_shape = layer.output_shape b = np.zeros(self.input_shape[1]) flipped_weights = [W, b] input = Input(shape = self.output_shape[1:]) output = Dense(output_dim = self.input_shape[1])(input) down_func = Model(input, output) down_func.set_weights(flipped_weights) self.down_func = down_func def up(self, data): ''' function to compute dense output in forward pass # Arguments data: Data to be operated in forward pass # Returns Result of dense layer ''' self.up_data = self.up_func.predict(data, learning_phase) return self.up_data def down(self, data): ''' function to compute dense output in backward pass # Arguments data: Data to be operated in forward pass # Returns Result of reverse dense layer ''' # data = data - self.bias self.down_data = self.down_func.predict(data) return self.down_data class DPooling(object): ''' A class to define forward and backward operation on Pooling ''' def __init__(self, layer): ''' # Arguments layer: an instance of Pooling layer, whose configuration will be used to initiate DPooling(input_shape, output_shape, weights) ''' self.layer = layer self.poolsize = layer.pool_size # self.poolsize = layer.poolsize def up(self, data): ''' function to compute pooling output in forward pass # Arguments data: Data to be operated in forward pass # Returns Pooled result ''' [self.up_data, self.switch] = \ self.__max_pooling_with_switch(data, self.poolsize) return self.up_data def down(self, data, learning_phase = 0): ''' function to compute unpooling output in backward pass # Arguments data: Data to be operated in forward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns Unpooled result ''' self.down_data = self.__max_unpooling_with_switch(data, self.switch) return self.down_data def __max_pooling_with_switch(self, input, poolsize): ''' Compute pooling output and switch in forward pass, switch stores location of the maximum value in each poolsize * poolsize block # Arguments input: data to be pooled poolsize: size of pooling operation # Returns Pooled result and Switch ''' switch = np.zeros(input.shape) out_shape = list(input.shape) row_poolsize = int(poolsize[0]) col_poolsize = int(poolsize[1]) out_shape[1] = out_shape[1] // poolsize[0] out_shape[2] = out_shape[2] // poolsize[1] pooled = np.zeros(out_shape) for sample in range(input.shape[0]): for dim in range(input.shape[3]): for row in range(out_shape[1]): for col in range(out_shape[2]): patch = input[sample, row * row_poolsize : (row + 1) * row_poolsize, col * col_poolsize : (col + 1) * col_poolsize, dim] max_value = patch.max() pooled[sample, row, col, dim] = max_value max_col_index = patch.argmax(axis = -1) max_cols = patch.max(axis = -1) max_row = max_cols.argmax() max_col = max_col_index[max_row] switch[sample, row * row_poolsize + max_row, col * col_poolsize + max_col, dim] = 1 return [pooled, switch] # Compute unpooled output using pooled data and switch def __max_unpooling_with_switch(self, input, switch): ''' Compute unpooled output using pooled data and switch # Arguments input: data to be pooled poolsize: size of pooling operation switch: switch storing location of each elements # Returns Unpooled result ''' out_shape = switch.shape unpooled = np.zeros(out_shape) for sample in range(input.shape[0]): for dim in range(input.shape[3]): tile = np.ones((switch.shape[1] // input.shape[1], switch.shape[2] // input.shape[2])) out = np.kron(input[sample, :, :, dim], tile) unpooled[sample, :, :, dim] = out * switch[sample, :, :, dim] return unpooled class DActivation(object): ''' A class to define forward and backward operation on Activation ''' def __init__(self, layer, linear = False): ''' # Arguments layer: an instance of Activation layer, whose configuration will be used to initiate DActivation(input_shape, output_shape, weights) ''' self.layer = layer self.linear = linear self.activation = layer.activation input = K.placeholder(shape = layer.output_shape) output = self.activation(input) # According to the original paper, # In forward pass and backward pass, do the same activation(relu) self.up_func = K.function( [input, K.learning_phase()], [output]) self.down_func = K.function( [input, K.learning_phase()], [output]) # Compute activation in forward pass def up(self, data, learning_phase = 0): ''' function to compute activation in forward pass # Arguments data: Data to be operated in forward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns Activation ''' self.up_data = self.up_func([data, learning_phase])[0] return self.up_data # Compute activation in backward pass def down(self, data, learning_phase = 0): ''' function to compute activation in backward pass # Arguments data: Data to be operated in backward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns Activation ''' self.down_data = self.down_func([data, learning_phase])[0] return self.down_data class DFlatten(object): ''' A class to define forward and backward operation on Flatten ''' def __init__(self, layer): ''' # Arguments layer: an instance of Flatten layer, whose configuration will be used to initiate DFlatten(input_shape, output_shape, weights) ''' self.layer = layer self.shape = layer.input_shape[1:] self.up_func = K.function( [layer.input, K.learning_phase()], [layer.output]) # Flatten 2D input into 1D output def up(self, data, learning_phase = 0): ''' function to flatten input in forward pass # Arguments data: Data to be operated in forward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns Flattened data ''' self.up_data = self.up_func([data, learning_phase])[0] return self.up_data # Reshape 1D input into 2D output def down(self, data, learning_phase = 0): ''' function to unflatten input in backward pass # Arguments data: Data to be operated in backward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns Recovered data ''' new_shape = [data.shape[0]] + list(self.shape) assert np.prod(self.shape) == np.prod(data.shape[1:]) self.down_data = np.reshape(data, new_shape) return self.down_data class DInput(object): ''' A class to define forward and backward operation on Input ''' def __init__(self, layer): ''' # Arguments layer: an instance of Input layer, whose configuration will be used to initiate DInput(input_shape, output_shape, weights) ''' self.layer = layer # input and output of Inputl layer are the same def up(self, data, learning_phase = 0): ''' function to operate input in forward pass, the input and output are the same # Arguments data: Data to be operated in forward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns data ''' self.up_data = data return self.up_data def down(self, data, learning_phase = 0): ''' function to operate input in backward pass, the input and output are the same # Arguments data: Data to be operated in backward pass learning_phase: learning_phase of Keras, 1 or 0 # Returns data ''' self.down_data = data return self.down_data
thanks, man!