keras-applications
keras-applications copied to clipboard
ValueError: If your data is in the form of symbolic tensors, you should specify the `steps` argument (instead of the `batch_size` argument, because symbolic tensors are expected to produce batches of input data)
`import os import tensorflow as tf from generator import Generator from discriminator import Discriminator from keras.applications.vgg16 import VGG16 from keras.preprocessing import image from keras.applications.vgg16 import preprocess_input import numpy as np from keras.layers import Input, Flatten, Dense
class AACNN(): """AACNN model. """ def init(self, sess, FLAGS): """Initialization.
Args:
sess: TensorFlow session
FLAGS: flags object
"""
# initialize variables
self.sess = sess
self.f = FLAGS
self.l2_weight = FLAGS.l2_weight
self.global_step_g = tf.Variable(0, trainable=False)
self.global_step_d = tf.Variable(0, trainable=False)
# inputs: real (training) images
images_shape = [self.f.output_size_height, self.f.output_size_wight, self.f.c_dim]
attributes_shape = [self.f.attribute_size]
# self.real_images = tf.placeholder(tf.float32, [None] + images_shape, name="real_images")
# inputs: HR images
self.input = tf.placeholder(tf.float32, [self.f.batch_size] + images_shape, name="input")
self.input_attribute = tf.placeholder(tf.float32, [self.f.batch_size] + attributes_shape, name="input_attribute")
# initialize models
generator = Generator(FLAGS)
discriminator = Discriminator(FLAGS)
model = VGG16(weights='imagenet',include_top=False)
# generator network
self.G = generator(self.input, self.input_attribute)
if self.f.with_gan:
### find out wether self.input is an image or what answer:get_image returns numpy array to
# discriminator network for real images
self.D_real, self.D_real_logits = discriminator(self.input)
# discriminator network for fake images
self.D_fake, self.D_fake_logits = discriminator(self.G, reuse=True)
# feature extraction by using VGG16
# first unscale the images then preprocess it and then extract features
#self.input_unscaled = sess.run(self.input)
#self.G_unscaled = sess.run(self.G)
#self.img_real = (self.input_unscaled+1)*127.5
#self.img_fake = (self.G_unscaled + 1)*127.5
self.img_real = (self.input+1)*127.5
self.img_fake = (self.G + 1)*127.5
print("-"*100)
print(self.img_real.shape)
print("-"*100)
#self.img_real=np.expand_dims(self.img_real,axis=0)
#self.img_fake=np.expand_dims(self.img_fake,axis=0)
print("-"*100)
print(self.img_real.shape)
print("-"*100)
self.img_real = preprocess_input(self.img_real)
self.img_fake = preprocess_input(self.img_fake)
print("-"*100)
print(self.img_real.shape)
print("-"*100)
self.feature_real = model.predict(self.img_real)
self.feature_fake = model.predict(self.img_fake)
self.feature_real = tf.convert_to_tensor(self.feature_real , dtype=tf.float32)
self.feature_fake = tf.convert_to_tensor(self.feature_fake , dtype=tf.float32)
#
self.vgg_loss = 2e-6 * tf.cost.mean_squared_error(self.feature_fake, self.feature_real, is_mean=True)
# losses
self.d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_real_logits,
labels=tf.ones_like(self.D_real))
)
self.d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_fake_logits,
labels=tf.zeros_like(self.D_fake))
)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_adv = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_fake_logits,
labels=tf.ones_like(self.D_fake))
)
else:
self.l2_weight = 1
self.g_loss_l2 = tf.losses.mean_squared_error(
self.input,
self.G,
weights=self.l2_weight,
scope=None
)
if self.f.with_gan:
self.g_loss = self.g_loss_l2 + self.g_loss_adv + self.vgg_loss
else:
self.g_loss = self.g_loss_l2
# create summaries
self.__create_summaries()
# organize variables
t_vars = tf.trainable_variables()
if self.f.with_gan:
self.d_vars = [var for var in t_vars if "d/" in var.name]
self.g_vars = [var for var in t_vars if "g/" in var.name]
#print self.g_vars
# saver
self.saver = tf.train.Saver()
`
here i have a batch size of 64 images image shape is(112,96,3) i want to extract features of self.input and self.G of the entire batch but the model cant take entire batch as input at once
@abhisheksrivastava2397, Can you share your snippets to reproduce the error?
>>> print(tf.__version__)
2.0.0
Try:
cce = tf.keras.losses.CategoricalCrossentropy()
t = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]
y = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]]
loss = cce(t,y)
Error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/llu/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keses.py", line 126, in __call__
losses = self.call(y_true, y_pred)
File "/home/llu/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keses.py", line 221, in call
return self.fn(y_true, y_pred, **self._fn_kwargs)
File "/home/llu/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keses.py", line 978, in sparse_categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
File "/home/llu/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/kekend.py", line 4503, in sparse_categorical_crossentropy
output.op.type != 'Softmax'):
AttributeError: 'list' object has no attribute 'op'
And this is standard example from tensorflow: https://www.tensorflow.org/api_docs/python/tf/keras/losses/CategoricalCrossentropy
@abhisheksrivastava2397 any progress on this?