adanet
adanet copied to clipboard
Test error when running tutorial examples
After pip install adanet, I went to run tutorial examples, I got following error, what should I do to solve this problem?
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpp5sv1icl INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmpp5sv1icl', '_tf_random_seed': 42, '_save_summary_steps': 50000, '_save_checkpoints_steps': 50000, '_save_checkpoints_secs': None, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x2b8920228208>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} INFO:tensorflow:Running training and evaluation locally (non-distributed). INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 600 secs (eval_spec.throttle_secs) or training is finished. INFO:tensorflow:Beginning training AdaNet iteration 0 INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized.
InternalError Traceback (most recent call last)
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/training.py in train_and_evaluate(estimator, train_spec, eval_spec) 445 '(with task id 0). Given task id {}'.format(config.task_id)) 446 --> 447 return executor.run() 448 449
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/training.py in run(self) 529 config.task_type != run_config_lib.TaskType.EVALUATOR): 530 logging.info('Running training and evaluation locally (non-distributed).') --> 531 return self.run_local() 532 533 # Distributed case.
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/training.py in run_local(self) 667 input_fn=self._train_spec.input_fn, 668 max_steps=self._train_spec.max_steps, --> 669 hooks=train_hooks) 670 671 if not self._continuous_eval_listener.before_eval():
~/anaconda3/lib/python3.6/site-packages/adanet/core/estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners) 440 hooks=hooks, 441 max_steps=max_steps, --> 442 saving_listeners=saving_listeners) 443 444 # If training ended because the maximum number of training steps
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners) 364 365 saving_listeners = _check_listeners_type(saving_listeners) --> 366 loss = self._train_model(input_fn, hooks, saving_listeners) 367 logging.info('Loss for final step: %s.', loss) 368 return self
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py in _train_model(self, input_fn, hooks, saving_listeners) 1117 return self._train_model_distributed(input_fn, hooks, saving_listeners) 1118 else: -> 1119 return self._train_model_default(input_fn, hooks, saving_listeners) 1120 1121 def _train_model_default(self, input_fn, hooks, saving_listeners):
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners) 1133 return self._train_with_estimator_spec(estimator_spec, worker_hooks, 1134 hooks, global_step_tensor, -> 1135 saving_listeners) 1136 1137 def _train_model_distributed(self, input_fn, hooks, saving_listeners):
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py in _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners) 1331 save_summaries_steps=self._config.save_summary_steps, 1332 config=self._session_config, -> 1333 log_step_count_steps=self._config.log_step_count_steps) as mon_sess: 1334 loss = None 1335 while not mon_sess.should_stop():
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in MonitoredTrainingSession(master, is_chief, checkpoint_dir, scaffold, hooks, chief_only_hooks, save_checkpoint_secs, save_summaries_steps, save_summaries_secs, config, stop_grace_period_secs, log_step_count_steps, max_wait_secs, save_checkpoint_steps) 413 all_hooks.extend(hooks) 414 return MonitoredSession(session_creator=session_creator, hooks=all_hooks, --> 415 stop_grace_period_secs=stop_grace_period_secs) 416 417
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in init(self, session_creator, hooks, stop_grace_period_secs) 824 super(MonitoredSession, self).init( 825 session_creator, hooks, should_recover=True, --> 826 stop_grace_period_secs=stop_grace_period_secs) 827 828
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in init(self, session_creator, hooks, should_recover, stop_grace_period_secs) 547 stop_grace_period_secs=stop_grace_period_secs) 548 if should_recover: --> 549 self._sess = _RecoverableSession(self._coordinated_creator) 550 else: 551 self._sess = self._coordinated_creator.create_session()
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in init(self, sess_creator) 1010 """ 1011 self._sess_creator = sess_creator -> 1012 _WrappedSession.init(self, self._create_session()) 1013 1014 def _create_session(self):
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in _create_session(self) 1015 while True: 1016 try: -> 1017 return self._sess_creator.create_session() 1018 except _PREEMPTION_ERRORS as e: 1019 logging.info('An error was raised while a session was being created. '
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in create_session(self) 704 """Creates a coordinated session.""" 705 # Keep the tf_sess for unit testing. --> 706 self.tf_sess = self._session_creator.create_session() 707 # We don't want coordinator to suppress any exception. 708 self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py in create_session(self) 475 init_op=self._scaffold.init_op, 476 init_feed_dict=self._scaffold.init_feed_dict, --> 477 init_fn=self._scaffold.init_fn) 478 479
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/session_manager.py in prepare_session(self, master, init_op, saver, checkpoint_dir, checkpoint_filename_with_path, wait_for_checkpoint, max_wait_secs, config, init_feed_dict, init_fn) 279 wait_for_checkpoint=wait_for_checkpoint, 280 max_wait_secs=max_wait_secs, --> 281 config=config) 282 if not is_loaded_from_checkpoint: 283 if init_op is None and not init_fn and self._local_init_op is None:
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/session_manager.py in _restore_checkpoint(self, master, saver, checkpoint_dir, checkpoint_filename_with_path, wait_for_checkpoint, max_wait_secs, config) 182 """ 183 self._target = master --> 184 sess = session.Session(self._target, graph=self._graph, config=config) 185 186 if checkpoint_dir and checkpoint_filename_with_path:
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in init(self, target, graph, config)
1561
1562 """
-> 1563 super(Session, self).init(target, graph, config=config)
1564 # NOTE(mrry): Create these on first __enter__
to avoid a reference cycle.
1565 self._default_graph_context_manager = None
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in init(self, target, graph, config) 631 if self._created_with_new_api: 632 # pylint: disable=protected-access --> 633 self._session = tf_session.TF_NewSession(self._graph._c_graph, opts) 634 # pylint: enable=protected-access 635 else:
InternalError: Failed to create session.