pennylane icon indicating copy to clipboard operation
pennylane copied to clipboard

[BUG] `dynamic_one_shot` does not work with TensorFlow

Open mudit2812 opened this issue 1 year ago • 1 comments

Expected behavior

dynamic_one_shot works with TensorFlow.

Actual behavior

It does not. This is related to mismatches between dtypes of multiple tensors.

Additional information

No response

Source code

import pennylane as qml
import tensorflow as tf

dev = qml.device("default.qubit", shots=10)

@qml.qnode(dev)
def f(x):
    qml.RX(x, 0)
    qml.measure(0)
    return qml.expval(qml.PauliX(0))

x = tf.Variable(0.4)
f(x)

Tracebacks

---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
Cell In[1], line 14
     11     return qml.expval(qml.PauliX(0))
     13 x = tf.Variable(0.4)
---> 14 f(x)

File ~/repos/pennylane/pennylane/workflow/qnode.py:1098, in QNode.__call__(self, *args, **kwargs)
   1095 self._update_gradient_fn(shots=override_shots, tape=self._tape)
   1097 try:
-> 1098     res = self._execution_component(args, kwargs, override_shots=override_shots)
   1099 finally:
   1100     if old_interface == "auto":

File ~/repos/pennylane/pennylane/workflow/qnode.py:1052, in QNode._execution_component(self, args, kwargs, override_shots)
   1049 full_transform_program.prune_dynamic_transform()
   1051 # pylint: disable=unexpected-keyword-arg
-> 1052 res = qml.execute(
   1053     (self._tape,),
   1054     device=self.device,
   1055     gradient_fn=self.gradient_fn,
   1056     interface=self.interface,
   1057     transform_program=full_transform_program,
   1058     config=config,
   1059     gradient_kwargs=self.gradient_kwargs,
   1060     override_shots=override_shots,
   1061     **self.execute_kwargs,
   1062 )
   1063 res = res[0]
   1065 # convert result to the interface in case the qfunc has no parameters

File ~/repos/pennylane/pennylane/workflow/execution.py:790, in execute(tapes, device, gradient_fn, interface, transform_program, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp)
    785 else:
    786     results = ml_boundary_execute(
    787         tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff
    788     )
--> 790 return post_processing(results)

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:88, in _apply_postprocessing_stack(results, postprocessing_stack)
     65 """Applies the postprocessing and cotransform postprocessing functions in a Last-In-First-Out LIFO manner.
     66 
     67 Args:
   (...)
     85 
     86 """
     87 for postprocessing in reversed(postprocessing_stack):
---> 88     results = postprocessing(results)
     89 return results

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:58, in _batch_postprocessing(results, individual_fns, slices)
     32 def _batch_postprocessing(
     33     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     34 ) -> ResultBatch:
     35     """Broadcast individual post processing functions onto their respective tapes.
     36 
     37     Args:
   (...)
     56 
     57     """
---> 58     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:58, in <genexpr>(.0)
     32 def _batch_postprocessing(
     33     results: ResultBatch, individual_fns: List[PostProcessingFn], slices: List[slice]
     34 ) -> ResultBatch:
     35     """Broadcast individual post processing functions onto their respective tapes.
     36 
     37     Args:
   (...)
     56 
     57     """
---> 58     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))

File ~/repos/pennylane/pennylane/transforms/dynamic_one_shot.py:143, in dynamic_one_shot.<locals>.processing_fn(results, has_partitioned_shots, batched_results)
    141 if not tape.shots.has_partitioned_shots:
    142     results = results[0]
--> 143 return parse_native_mid_circuit_measurements(tape, aux_tapes, results)

File ~/repos/pennylane/pennylane/transforms/dynamic_one_shot.py:281, in parse_native_mid_circuit_measurements(circuit, aux_tapes, results)
    276     if not isinstance(m, CountsMP):
    277         # We don't need to cast to arrays when using qml.counts. qml.math.array is not viable
    278         # as it assumes all elements of the input are of builtin python types and not belonging
    279         # to any particular interface
    280         result = qml.math.stack(result, like=interface)
--> 281     meas = gather_non_mcm(m, result, is_valid)
    282     m_count += 1
    283 if isinstance(m, SampleMP):

File ~/repos/pennylane/pennylane/transforms/dynamic_one_shot.py:310, in gather_non_mcm(circuit_measurement, measurement, is_valid)
    308     return dict(sorted(tmp.items()))
    309 if isinstance(circuit_measurement, ExpectationMP):
--> 310     return qml.math.sum(measurement * is_valid) / qml.math.sum(is_valid)
    311 if isinstance(circuit_measurement, ProbabilityMP):
    312     return qml.math.sum(measurement * is_valid.reshape((-1, 1)), axis=0) / qml.math.sum(
    313         is_valid
    314     )

File ~/.pyenv/versions/3.10.12/envs/pennylane/lib/python3.10/site-packages/tensorflow/python/util/traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153   raise e.with_traceback(filtered_tb) from None
    154 finally:
    155   del filtered_tb

File ~/.pyenv/versions/3.10.12/envs/pennylane/lib/python3.10/site-packages/tensorflow/python/framework/ops.py:7164, in raise_from_not_ok_status(e, name)
   7162 def raise_from_not_ok_status(e, name):
   7163   e.message += (" name: " + name if name is not None else "")
-> 7164   raise core._status_to_exception(e) from None

InvalidArgumentError: cannot compute Mul as input #1(zero-based) was expected to be a double tensor but is a bool tensor [Op:Mul]

System information

Dev

Existing GitHub issues

  • [X] I have searched existing GitHub issues to make sure the issue does not already exist.

mudit2812 avatar May 17 '24 17:05 mudit2812

Note that moving device preprocessing inside the ML framework boundary solves this problem:

https://github.com/PennyLaneAI/pennylane/compare/master...inner-transform-program

albi3ro avatar May 17 '24 17:05 albi3ro

This works now. @mudit2812 Can we close this too?

astralcai avatar Dec 12 '24 19:12 astralcai

Go for it. My bad for forgetting to close these issues 😅

mudit2812 avatar Dec 12 '24 20:12 mudit2812