hyperas icon indicating copy to clipboard operation
hyperas copied to clipboard

TypeError: set_postfix() argument after ** must be a mapping, not str

Open rameshjes opened this issue 6 years ago • 2 comments

Hello, I am trying to use the hyperas for tuning the multi-input LSTM model, but i am unable to figure out, where things are going wrong. I have searched online, but could not find anything relevant to this.

This is my code:

def lstm_model(x_claim, x_sents, x_labels, test_claims_data, test_sents_data, test_labels):

	vocab_size_of_claims = 15000
	vocab_size_of_sents = 30000

	max_claims_length = 65
	max_sents_length = 350

	embedding_dim = 300

	embeddings_index = pickle.load(open("embeddings_"+str(embedding_dim)+"dim_dict.pkl", "rb"))
	
	(embed_matrix_c, embed_dim_c) = preprocess.create_embedding_matrix(vocab_size_of_claims, embeddings_index, claim_word_train_index, embedding_dim)
	(embed_matrix_s, embed_dim_s) = preprocess.create_embedding_matrix(vocab_size_of_sents, embeddings_index, sents_word_train_index, embedding_dim)

	claims_input = Input(shape=(None,), dtype='int32', name='claims')
	embed_claims = Embedding(vocab_size_of_claims, embed_dim_c)(claims_input)
	encoded_claims = LSTM({{choice([16, 64, 256, 512, 1024])}}, return_sequences=True, recurrent_dropout={{uniform(0, 1)}}, dropout={{uniform(0, 1)}})(embed_claims)
	encoded_claims = LSTM({{choice([16, 64, 256, 512, 1024])}}, return_sequences=True, recurrent_dropout={{uniform(0, 1)}}, dropout={{uniform(0, 1)}})(encoded_claims)
	encoded_claims = LSTM({{choice([16, 64, 256, 512, 1024])}})(encoded_claims)

	sentences_input = Input(shape=(None,), dtype='int32', name='sentences')
	embed_sents = Embedding(vocab_size_of_sents, embed_dim_s)(sentences_input)
	encoded_sentences = LSTM({{choice([8, 16, 64, 256, 512, 1024])}}, return_sequences=True, recurrent_dropout={{uniform(0, 1)}}, dropout={{uniform(0, 1)}})(embed_sents)
	encoded_sentences = LSTM({{choice([8, 16, 64, 256, 512, 1024])}}, return_sequences=True, recurrent_dropout={{uniform(0, 1)}}, dropout={{uniform(0, 1)}})(encoded_sentences)
	encoded_sentences = LSTM({{choice([8, 16, 64, 256, 512, 1024])}})(encoded_sentences)

	concatenate_layers = concatenate([encoded_claims, encoded_sentences],
										axis=-1)

	concatenate_layers = Dense({{choice([16, 64, 256, 512, 1024])}}, kernel_regularizer=regularizers.l2(0.001), activation='relu')(concatenate_layers)
	concatenate_layers = Dropout({{uniform(0, 1)}})(concatenate_layers)
	# concatenate_layers = Dense({{choice([16, 64, 256, 512, 1024])}}, kernel_regularizer=regularizers.l2(0.001), activation='relu')(concatenate_layers)
	# concatenate_layers = Dropout({{uniform(0, 1)}})(concatenate_layers)
	concatenate_layers = Dense({{choice([16, 64, 256, 512, 1024])}}, kernel_regularizer=regularizers.l2(0.001), activation='relu')(concatenate_layers)

	concatenate_layers = Dropout({{uniform(0, 1)}})(concatenate_layers)

	pred_label = Dense(1, activation='sigmoid')(concatenate_layers)
	model = Model([claims_input, sentences_input], pred_label) 

	model.layers[2].set_weights([embed_matrix_c])
	model.layers[2].trainable = False
	model.layers[3].set_weights([embed_matrix_s])
	model.layers[3].trainable = False

	print (model.summary())
	early_stopping = EarlyStopping(monitor='val_loss', patience=2)
	checkpointer = ModelCheckpoint(filepath='keras_weights_hyperas_fever_full.h5', 
	                               verbose=1, 
	                               save_best_only=True)

	model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])

	model.fit({'claims': x_claim, 'sentences': x_sents}, x_labels,
	          batch_size={{choice([64, 128])}},
	          epochs=20,
	          verbose=2,
	          validation_split=0.1, callbacks=[early_stopping, checkpointer])

	score, acc = model.evaluate({'claims': test_claims_data, 'sentences': test_sents_data}, test_labels, verbose=0)
	print('Test accuracy:', acc)
	return {'loss': -acc, 'status': STATUS_OK, 'model': model}


def data():

	vocab_size_of_claims = 15000
	vocab_size_of_sents = 30000

	max_claims_length = 65
	max_sents_length = 350

	embedding_dim = 300


	global claim_word_train_index
	global sents_word_train_index
	
	train_dataset_name = "fever_full_binary_train"
	test_dataset_name = "fever_full_binary_dev"

	train_data = "/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/"+train_dataset_name+".jsonl" 
	test_data = "/home/kkuma12s/thesis/Proof_Extraction/data/fever-full/"+test_dataset_name+".jsonl"
	
	claims = []
	sents = []
	labels = []

	with jsonlines.open(train_data, mode='r') as f:
		for example in f:
			claims.append(example["claim"])
			sents.append(example["sentence"])
			labels.append(example["label"])

	preprocess = preProcessing()


	x_claim, x_sents, x_labels, claim_word_train_index,  sents_word_train_index = preprocess.to_padding(claims, 
										sents, labels, vocab_size_of_claims, vocab_size_of_sents,
										max_claims_length, max_sents_length)

	claims = []
	sents = []
	labels = []

	with jsonlines.open(test_data, mode='r') as f:
		for example in f:
			claims.append(example["claim"])
			sents.append(example["sentence"])
			labels.append(example["label"])

	test_claims_data, test_sents_data, test_labels, claim_word_test_index, sents_word_test_index= preprocess.to_padding(claims, 
																sents, labels, vocab_size_of_claims, vocab_size_of_sents,
										max_claims_length, max_sents_length)


	return (x_claim, x_sents, x_labels, test_claims_data, test_sents_data, test_labels)


if __name__ == '__main__':
	
	x_claim, x_sents, x_labels, test_claims_data, test_sents_data, test_labels= data()

	best_run, best_model = optim.minimize(model=lstm_model,
	                                  data=data,
	                                  algo=tpe.suggest,
	                                  max_evals=10,
	                                  trials=Trials())

	print ("best model ", best_model.evaluate({'claims': test_claims_data, 'sentences': test_sents_data}, test_labels, verbose=0))
	print ("best run ", best_run)

Error:


Traceback (most recent call last):
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "/home/kkuma12s/thesis/Proof_Extraction/models/DeepLearningModels/lstm/tune_hyperas_fever_full.py", line 191, in <module>
    trials=Trials())
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperas/optim.py", line 69, in minimize
    keep_temp=keep_temp)
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperas/optim.py", line 139, in base_minimizer
    return_argmin=True),
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperopt/fmin.py", line 388, in fmin
    show_progressbar=show_progressbar,
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperopt/base.py", line 639, in fmin
    show_progressbar=show_progressbar)
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperopt/fmin.py", line 407, in fmin
    rval.exhaust()
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperopt/fmin.py", line 262, in exhaust
    self.run(self.max_evals - n_done, block_until_done=self.asynchronous)
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/hyperopt/fmin.py", line 198, in run
    disable=not self.show_progressbar, dynamic_ncols=True,
  File "/home/kkuma12s/anaconda3/envs/bert_env/lib/python3.6/site-packages/tqdm/_tqdm.py", line 850, in __init__
    self.set_postfix(refresh=False, **postfix)
TypeError: set_postfix() argument after ** must be a mapping, not str

rameshjes avatar May 17 '19 12:05 rameshjes

have you fixed this problem?

patrick-g-zhang avatar Jul 17 '19 06:07 patrick-g-zhang

@patrick-g-zhang not yet.

rameshjes avatar Jul 17 '19 11:07 rameshjes