bert-for-tf2 icon indicating copy to clipboard operation
bert-for-tf2 copied to clipboard

ResourceExhaustedError: OOM when allocating tensor with shape[501153,768] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Mul]

Open zrajabi opened this issue 3 years ago • 0 comments

I am training a model using bert and TF2.3. I have loaded both language-agnostic Bert Sentence Encoder model (laBSE) and another multilingual model from TF hub or bert_multi_cased_L-12_H-768_A-12, and I get the following OOM error.

ResourceExhaustedError: OOM when allocating tensor with shape[501153,768] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Mul]

I have tried to reduce batch size as low as 8 then even 1, also lowered max_seq_length to 20, I have 4 GPUs of 16 GB, but I still get this error. What is the solution? Please help, thank you!

ResourceExhaustedError Traceback (most recent call last) in 33 batch_sample = (query_batch, pos_batch, neg_batch, position_bias_batch) 34 ---> 35 batch_loss, loss_history, logits = train_step(query_batch, pos_batch, neg_batch, position_bias_batch, triplet_model, optimizer, loss_history, margin) 36 total_loss += batch_loss 37

in train_step(query_batch, pos_batch, neg_batch, position_bias_batch, triplet_model, optimizer, loss_history, margin) 18 loss_history.append(loss_value.numpy().mean()) 19 grads = tape.gradient(loss_value, triplet_model.trainable_variables) ---> 20 optimizer.apply_gradients(zip(grads, triplet_model.trainable_variables)) 21 22 return loss_value.numpy().mean(), loss_history, logits # is it batch loss

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in apply_gradients(self, grads_and_vars, name, experimental_aggregate_gradients) 547 args=(grads_and_vars,), 548 kwargs={ --> 549 "name": name, 550 }) 551

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py in merge_call(self, merge_fn, args, kwargs) 2713 merge_fn = autograph.tf_convert( 2714 merge_fn, autograph_ctx.control_status_ctx(), convert_by_default=False) -> 2715 return self._merge_call(merge_fn, args, kwargs) 2716 2717 def _merge_call(self, merge_fn, args, kwargs):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py in _merge_call(self, merge_fn, args, kwargs) 2720 distribution_strategy_context._CrossReplicaThreadMode(self._strategy)) # pylint: disable=protected-access 2721 try: -> 2722 return merge_fn(self._strategy, *args, **kwargs) 2723 finally: 2724 _pop_per_thread_mode()

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py in wrapper(*args, **kwargs) 273 def wrapper(*args, **kwargs): 274 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED): --> 275 return func(*args, **kwargs) 276 277 if inspect.isfunction(func) or inspect.ismethod(func):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in distributed_apply(self, distribution, grads_and_vars, name, apply_state) 631 "update" + var.op.name, skip_on_eager=True): 632 update_ops.extend(distribution.extended.update( --> 633 var, apply_grad_to_update_var, args=(grad,), group=False)) 634 635 any_symbolic = any(isinstance(i, ops.Operation) or

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py in update(self, var, fn, args, kwargs, group) 2298 fn, autograph_ctx.control_status_ctx(), convert_by_default=False) 2299 with self._container_strategy().scope(): -> 2300 return self._update(var, fn, args, kwargs, group) 2301 2302 def _update(self, var, fn, args, kwargs, group):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py in _update(self, var, fn, args, kwargs, group) 2953 # The implementations of _update() and _update_non_slot() are identical 2954 # except _update() passes var as the first argument to fn(). -> 2955 return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group) 2956 2957 def _update_non_slot(self, colocate_with, fn, args, kwargs, should_group):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py in _update_non_slot(self, colocate_with, fn, args, kwargs, should_group) 2959 # once that value is used for something. 2960 with UpdateContext(colocate_with): -> 2961 result = fn(*args, **kwargs) 2962 if should_group: 2963 return result

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py in wrapper(*args, **kwargs) 273 def wrapper(*args, **kwargs): 274 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED): --> 275 return func(*args, **kwargs) 276 277 if inspect.isfunction(func) or inspect.ismethod(func):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in apply_grad_to_update_var(var, grad) 602 apply_kwargs["apply_state"] = apply_state 603 return self._resource_apply_sparse_duplicate_indices( --> 604 grad.values, var, grad.indices, **apply_kwargs) 605 606 if "apply_state" in self._dense_apply_args:

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in _resource_apply_sparse_duplicate_indices(self, grad, handle, indices, **kwargs) 1124 values=grad, indices=indices) 1125 return self._resource_apply_sparse(summed_grad, handle, unique_indices, -> 1126 **kwargs) 1127 1128 def _resource_apply_sparse(self, grad, handle, indices, apply_state):

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/keras/optimizer_v2/adam.py in _resource_apply_sparse(self, grad, var, indices, apply_state) 224 v_sqrt = math_ops.sqrt(v_t) 225 var_update = state_ops.assign_sub( --> 226 var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']), 227 use_locking=self._use_locking) 228 return control_flow_ops.group(*[var_update, m_t, v_t])

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y) 1123 with ops.name_scope(None, op_name, [x, y]) as name: 1124 try: -> 1125 return func(x, y, name=name) 1126 except (TypeError, ValueError) as e: 1127 # Even if dispatching the op failed, the RHS may be a tensor aware

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in _mul_dispatch(x, y, name) 1455 return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape) 1456 else: -> 1457 return multiply(x, y, name=name) 1458 1459

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs) 199 """Call target, and fall back on dispatchers if there is a TypeError.""" 200 try: --> 201 return target(*args, **kwargs) 202 except (TypeError, ValueError): 203 # Note: convert_to_eager_tensor currently raises a ValueError, not a

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in multiply(x, y, name) 507 """ 508 --> 509 return gen_math_ops.mul(x, y, name) 510 511

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py in mul(x, y, name) 6164 return _result 6165 except _core._NotOkStatusException as e: -> 6166 _ops.raise_from_not_ok_status(e, name) 6167 except _core._FallbackException: 6168 pass

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name) 6841 message = e.message + (" name: " + name if name is not None else "") 6842 # pylint: disable=protected-access -> 6843 six.raise_from(core._status_to_exception(e.code, message), None) 6844 # pylint: enable=protected-access 6845

~/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/six.py in raise_from(value, from_value)

ResourceExhaustedError: OOM when allocating tensor with shape[501153,768] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Mul]

zrajabi avatar Sep 24 '20 23:09 zrajabi