tf-dropblock icon indicating copy to clipboard operation
tf-dropblock copied to clipboard

DropBlock1D

Open skgone opened this issue 4 years ago • 1 comments

thank you for your great work! i want to use dropblock1D for time series ,how to implement dropblock1D?

skgone avatar Apr 25 '20 06:04 skgone

i modified it to 1D,but the performance is worse than dropout, may be something is wrong.

class DropBlock1D(tf.keras.layers.Layer):

def __init__(self, keep_prob, block_size, scale=True, **kwargs):
    super(DropBlock1D, self).__init__(**kwargs)
    self.keep_prob = float(keep_prob) if isinstance(keep_prob, int) else keep_prob
    self.block_size = int(block_size)
    self.scale = tf.constant(scale, dtype=tf.bool) if isinstance(scale, bool) else scale

def compute_output_shape(self, input_shape):
    return input_shape

def build(self, input_shape):
    assert len(input_shape) == 3
    _, self.h, self.channel = input_shape.as_list()
    # pad the mask
    p1 = (self.block_size - 1) // 2
    p0 = (self.block_size - 1) - p1
    self.padding = [[0, 0], [p0, p1], [0, 0]]
    self.set_keep_prob()
    super(DropBlock1D, self).build(input_shape)

def call(self, inputs, training=None, **kwargs):
    def drop():
        mask = self._create_mask(tf.shape(inputs))
        output = inputs * mask
        output = tf.cond(self.scale,
                         true_fn=lambda: output * tf.compat.v1.to_float(tf.size(mask)) / tf.reduce_sum(mask),
                         false_fn=lambda: output)
        return output

    if training is None:
        training = K.learning_phase()
    output = tf.cond(tf.logical_or(tf.logical_not(training), tf.equal(self.keep_prob, 1.0)),
                     true_fn=lambda: inputs,
                     false_fn=drop)
    return output

def set_keep_prob(self, keep_prob=None):
    """This method only supports Eager Execution"""
    if keep_prob is not None:
        self.keep_prob = keep_prob
    h = tf.compat.v1.to_float(self.h)
    self.gamma = ((1. - self.keep_prob) /self.block_size)*(h/h-self.block_size+1)

def _create_mask(self, input_shape):
    sampling_mask_shape = tf.stack([input_shape[0],
                                   self.h - self.block_size + 1,
                                   self.channel])
    mask = _bernoulli(sampling_mask_shape, self.gamma)
    mask = tf.pad(mask, self.padding)
    mask = tf.nn.max_pool(mask, [1, self.block_size, 1], [1, 1, 1], 'SAME')
    mask = 1 - mask
    return mask

skgone avatar Apr 28 '20 03:04 skgone