tensorflow-DeepFM
tensorflow-DeepFM copied to clipboard
如何export模型
我用如下代码导出DeepFM模型,但是发现freeze_model_dir/variables为空,请问应该如何导出DeepFM的pb模型?
def freeze_model(self): freeze_model_dir = "freeze_model_dir" save_dir = 'checkpoints/' save_path = os.path.join(save_dir, 'best_validation') start_time =
time() print(tf.trainable_variables()) print("freeze model...") SIGNATURE_NAME = "serving_default" builder = tf.saved_model.builder.SavedModelBuilder(freeze_model_dir) inputs = {'feat_index': tf.saved_model.utils.build_tensor_info(self.feat_index), 'feat_value': tf.saved_model.utils.build_tensor_info(self.feat_value), 'dropout_keep_fm': tf.saved_model.utils.build_tensor_info(self.dropout_keep_fm), 'dropput_keep_deep': tf.saved_model.utils.build_tensor_info(self.dropout_keep_deep), 'train_phase': tf.saved_model.utils.build_tensor_info(self.train_phase)} outputs = {'y_pred': tf.saved_model.utils.build_tensor_info(self.y_pred)} builder.add_meta_graph_and_variables(self.sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:tf.saved_model.signature_def_utils.build_signature_def( inputs, outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME ) }, main_op=tf.tables_initializer(), strip_default_attrs=True ) builder.save()
format the code
def freeze_model(self):
freeze_model_dir = "freeze_model_dir"
save_dir = 'checkpoints/'
save_path = os.path.join(save_dir, 'best_validation')
start_time = time()
print("freeze model...")
SIGNATURE_NAME = "serving_default"
builder = tf.saved_model.builder.SavedModelBuilder(freeze_model_dir)
inputs = {'feat_index': tf.saved_model.utils.build_tensor_info(self.feat_index),
'feat_value': tf.saved_model.utils.build_tensor_info(self.feat_value),
'dropout_keep_fm': tf.saved_model.utils.build_tensor_info(self.dropout_keep_fm),
'dropput_keep_deep': tf.saved_model.utils.build_tensor_info(self.dropout_keep_deep),
'train_phase': tf.saved_model.utils.build_tensor_info(self.train_phase)}
outputs = {'y_pred': tf.saved_model.utils.build_tensor_info(self.y_pred)}
builder.add_meta_graph_and_variables(self.sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:tf.saved_model.signature_def_utils.build_signature_def(
inputs, outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
},
main_op=tf.tables_initializer(),
strip_default_attrs=True
)
builder.save()
同问,savedmodel保存为空,这个问题有人解决了吗?
同问,savedmodel保存为空,这个问题有人解决了吗?
with tf.name_scope("embedding"):
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # None * F * K
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
with tf.name_scope("first_order"):
# ---------- first order term ----------
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F
with tf.name_scope("second_order"):
# ---------- second order term ---------------
# sum_square part
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K
self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K
# square_sum part
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K
# second order
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K
self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
要加tf.namespace,这样在saveModel的时候才可以存储下来。
同问,savedmodel保存为空,这个问题有人解决了吗?
with tf.name_scope("embedding"): self.weights = self._initialize_weights() # model self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"], self.feat_index) # None * F * K feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1]) self.embeddings = tf.multiply(self.embeddings, feat_value) with tf.name_scope("first_order"): # ---------- first order term ---------- self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1 self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F with tf.name_scope("second_order"): # ---------- second order term --------------- # sum_square part self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K # square_sum part self.squared_features_emb = tf.square(self.embeddings) self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K # second order self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
要加tf.namespace,这样在saveModel的时候才可以存储下来。
加了tf.namespace之后,variables 仍然为空……
同问,savedmodel保存为空,这个问题有人解决了吗?
with tf.name_scope("embedding"): self.weights = self._initialize_weights() # model self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"], self.feat_index) # None * F * K feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1]) self.embeddings = tf.multiply(self.embeddings, feat_value) with tf.name_scope("first_order"): # ---------- first order term ---------- self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1 self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F with tf.name_scope("second_order"): # ---------- second order term --------------- # sum_square part self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K # square_sum part self.squared_features_emb = tf.square(self.embeddings) self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K # second order self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
要加tf.namespace,这样在saveModel的时候才可以存储下来。
加了tf.namespace之后,variables 仍然为空…… 要加在调用saver的代码块,把整体的命名空间加上
没有调用saver,我用的是楼主的tensorflow-serving的代码,请问你指的是哪个部分?
`builder = tf.saved_model.builder.SavedModelBuilder('models/siam-fc/1')
builder.add_meta_graph_and_variables( model.sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ 'tracker_init': model_signature, 'tracker_predict': model_signature2 }, saver=model.saver ) builder.save()`
问题解决,可以在上述代码块中指定saver参数。 具体可参考 https://github.com/tensorflow/models/issues/1988
https://github.com/tensorflow/models/issues/1988#issuecomment-500785470
我在大佬的基础上修改增加了保存checkpoint、summary和embedding的功能 DinLei/DeepFM-TF 无意抄袭,不知道怎样能提交给大佬那边去,望大佬见谅,希望大佬能审核一下让我commit到正版中去
同问,savedmodel保存为空,这个问题有人解决了吗?
with tf.name_scope("embedding"): self.weights = self._initialize_weights() # model self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"], self.feat_index) # None * F * K feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1]) self.embeddings = tf.multiply(self.embeddings, feat_value) with tf.name_scope("first_order"): # ---------- first order term ---------- self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1 self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F self.y_first_order = tf.nn.dropout(self.y_first_order, self.dropout_keep_fm[0]) # None * F with tf.name_scope("second_order"): # ---------- second order term --------------- # sum_square part self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K # square_sum part self.squared_features_emb = tf.square(self.embeddings) self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K # second order self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K self.y_second_order = tf.nn.dropout(self.y_second_order, self.dropout_keep_fm[1]) # None * K
要加tf.namespace,这样在saveModel的时候才可以存储下来。
加了tf.namespace之后,variables 仍然为空…… 要加在调用saver的代码块,把整体的命名空间加上
没有调用saver,我用的是楼主的tensorflow-serving的代码,请问你指的是哪个部分?
老哥你们的环境版本是咋样的 啊 这上面也没说 我的tf1.14 cuda10.0 不得行
https://github.com/whk6688/tensorflow-DeepFM 可以看看