diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md
index 24ecbad37a0..33a170d2bcc 100644
--- a/.github/CHANGELOG.md
+++ b/.github/CHANGELOG.md
@@ -305,6 +305,20 @@
Breaking changes
+* Updated how parameter-shift gradient recipes are defined for operations, allowing for
+ gradient recipes that are specified as an arbitrary number of terms.
+ [(#909)](https://github.com/PennyLaneAI/pennylane/pull/909)
+
+ Previously, `Operation.grad_recipe` was restricted to two-term parameter-shift formulas.
+ With this change, the gradient recipe now contains elements of the form
+ :math:`[c_i, a_i, s_i]`, resulting in a gradient recipe of
+ :math:`\frac{\partial}{\partial\phi_k}f(\phi_k) = \sum_{i} c_i f(a_i \phi_k + s_i )`.
+
+ As this is a breaking change, all custom operations with defined gradient recipes must be
+ updated to continue working with PennyLane 0.13. Note though that if `grad_recipe = None`, the
+ default gradient recipe remains unchanged, and corresponds to the two terms :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]`
+ and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]` for every parameter.
+
- The `VQECost` class has been renamed to `ExpvalCost` to reflect its general applicability
beyond VQE. Use of `VQECost` is still possible but will result in a deprecation warning.
[(#913)](https://github.com/PennyLaneAI/pennylane/pull/913)
diff --git a/doc/development/plugins.rst b/doc/development/plugins.rst
index 248f211843f..bf62f205d37 100644
--- a/doc/development/plugins.rst
+++ b/doc/development/plugins.rst
@@ -466,21 +466,19 @@ where
* :attr:`~.Operation.grad_method`: the gradient computation method; ``'A'`` for the analytic
method, ``'F'`` for finite differences, and ``None`` if the operation may not be differentiated
-* :attr:`~.Operation.grad_recipe`: The gradient recipe for the analytic ``'A'`` method.
- This is a list with one tuple per operation parameter. For parameter :math:`k`, the tuple is of
- the form :math:`(c_k, s_k)`, resulting in a gradient recipe of
+* :attr:`~.Operation.grad_recipe`: The gradient recipe for the analytic ``'A'``
+ method. This is a tuple with one nested list per operation parameter. For
+ parameter :math:`\phi_k`, the nested list contains elements of the form
+ :math:`[c_i, a_i, s_i]`, resulting in a gradient recipe of
- .. math:: \frac{d}{d\phi_k}f(O(\phi_k)) = c_k\left[f(O(\phi_k+s_k))-f(O(\phi_k-s_k))\right].
+ .. math:: \frac{\partial}{\partial\phi_k}f(\phi_k) = \sum_{i} c_i f(a_i \phi_k+s_i),
- where :math:`f` is an expectation value that depends on :math:`O(\phi_k)`, an example being
+ where :math:`f` is the expectation value of an observable on a circuit that has been evolved by
+ the operation being considered with parameter :math:`\phi_k`.
- .. math:: f(O(\phi_k)) = \braket{0 | O^{\dagger}(\phi_k) \hat{B} O(\phi_k) | 0}
-
- which is the simple expectation value of the operator :math:`\hat{B}` evolved via the gate
- :math:`O(\phi_k)`.
-
- Note that if ``grad_recipe = None``, the default gradient recipe is
- :math:`(c_k, s_k)=(1/2, \pi/2)` for every parameter.
+ Note that if ``grad_recipe = None``, the default gradient recipe containing
+ the two terms :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1,
+ s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter.
The user can then import this operation directly from your plugin, and use it when defining a QNode:
diff --git a/pennylane/operation.py b/pennylane/operation.py
index 579639f6553..d85e260d27a 100644
--- a/pennylane/operation.py
+++ b/pennylane/operation.py
@@ -65,10 +65,25 @@
transformation on the quadrature operators.
For gates that *are* supported via the analytic method, the gradient recipe
-(with multiplier :math:`c_k`, parameter shift :math:`s_k` for parameter :math:`\phi_k`)
works as follows:
-.. math:: \frac{\partial}{\partial\phi_k}O = c_k\left[O(\phi_k+s_k)-O(\phi_k-s_k)\right].
+.. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k+s_i).
+
+where :math:`f` is the expectation value of an observable on a circuit that has
+been evolved by the operation being considered with parameter :math:`\phi_k`,
+there are multiple terms indexed with :math:`i` for each parameter :math:`\phi`
+and the :math:`[c_i, a_i, s_i]` are coefficients specific to the gate.
+
+The following specific case holds for example for qubit operations that are
+generated by one of the Pauli matrices and results in an overall positive and
+negative shift:
+
+.. math::
+
+ \frac{\partial}{\partial\phi_k}f = \frac{1}{2}\left[f \left( \phi_k+\frac{\pi}{2} \right) - f
+ \left( \phi_k-\frac{\pi}{2} \right)\right],
+
+i.e., so that :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]`.
CV Operation base classes
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -613,19 +628,22 @@ def grad_method(self):
return None if self.num_params == 0 else "F"
grad_recipe = None
- r"""list[tuple[float]] or None: Gradient recipe for the parameter-shift method.
+ r"""tuple(Union(list[list[float]], None)) or None: Gradient recipe for the
+ parameter-shift method.
- This is a list with one tuple per operation parameter. For parameter
- :math:`k`, the tuple is of the form :math:`(c_k, s_k)`, resulting in
- a gradient recipe of
+ This is a tuple with one nested list per operation parameter. For
+ parameter :math:`\phi_k`, the nested list contains elements of the form
+ :math:`[c_i, a_i, s_i]` where :math:`i` is the index of the
+ term, resulting in a gradient recipe of
- .. math:: \frac{\partial}{\partial\phi_k}O = c_k\left[O(\phi_k+s_k)-O(\phi_k-s_k)\right].
+ .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k + s_i).
- If ``None``, the default gradient recipe
- :math:`(c_k, s_k)=(1/2, \pi/2)` is assumed for every parameter.
+ If ``None``, the default gradient recipe containing the two terms
+ :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1,
+ s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter.
"""
- def get_parameter_shift(self, idx):
+ def get_parameter_shift(self, idx, shift=np.pi / 2):
"""Multiplier and shift for the given parameter, based on its gradient recipe.
Args:
@@ -636,16 +654,32 @@ def get_parameter_shift(self, idx):
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
- multiplier, shift = (0.5, np.pi / 2) if recipe is None else recipe
- # internal multiplier in the Variable
- var_mult = self.data[idx].mult
+ # Default values
+ multiplier = 0.5 / np.sin(shift)
+ a = 1
+
+ # We set the default recipe following:
+ # ∂f(x) = c*f(x+s) - c*f(x-s)
+ # where we express a positive and a negative shift by default
+ default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]
+ param_shift = default_param_shift if recipe is None else recipe
+
+ if hasattr(self.data[idx], "mult"):
+ # Parameter is a variable, we are in non-tape mode
+ # Need to use the internal multiplier in the Variable to update the
+ # multiplier and the shift
+ var_mult = self.data[idx].mult
+
+ for elem in param_shift:
- multiplier *= var_mult
- if var_mult != 0:
- # zero multiplier means the shift is unimportant
- shift /= var_mult
- return multiplier, shift
+ # Update the multiplier
+ elem[0] *= var_mult
+ if var_mult != 0:
+ # Update the shift
+ # zero multiplier means the shift is unimportant
+ elem[2] /= var_mult
+ return param_shift
@property
def generator(self):
@@ -1588,16 +1622,33 @@ def heisenberg_pd(self, idx):
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
- multiplier = 0.5 if recipe is None else recipe[0]
- shift = np.pi / 2 if recipe is None else recipe[1]
+
+ # Default values
+ multiplier = 0.5
+ a = 1
+ shift = np.pi / 2
+
+ # We set the default recipe to as follows:
+ # ∂f(x) = c*f(x+s) - c*f(x-s)
+ default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]
+ param_shift = default_param_shift if recipe is None else recipe
+
+ pd = None # partial derivative of the transformation
p = self.parameters
- # evaluate the transform at the shifted parameter values
- p[idx] += shift
- U2 = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
- p[idx] -= 2 * shift
- U1 = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
- return (U2 - U1) * multiplier # partial derivative of the transformation
+
+ original_p_idx = p[idx]
+ for c, _a, s in param_shift:
+ # evaluate the transform at the shifted parameter values
+ p[idx] = _a * original_p_idx + s
+ U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
+
+ if pd is None:
+ pd = c * U
+ else:
+ pd += c * U
+
+ return pd
def heisenberg_tr(self, wires, inverse=False):
r"""Heisenberg picture representation of the linear transformation carried
diff --git a/pennylane/ops/cv.py b/pennylane/ops/cv.py
index 9bc9ead1385..a965ec2b4f2 100644
--- a/pennylane/ops/cv.py
+++ b/pennylane/ops/cv.py
@@ -138,7 +138,9 @@ class Squeezing(CVOperation):
grad_method = "A"
shift = 0.1
- grad_recipe = [(0.5 / math.sinh(shift), shift), None]
+ multiplier = 0.5 / math.sinh(shift)
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)
@staticmethod
def _heisenberg_rep(p):
@@ -180,7 +182,9 @@ class Displacement(CVOperation):
grad_method = "A"
shift = 0.1
- grad_recipe = [(0.5 / shift, shift), None]
+ multiplier = 0.5 / shift
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)
@staticmethod
def _heisenberg_rep(p):
@@ -278,8 +282,11 @@ class TwoModeSqueezing(CVOperation):
par_domain = "R"
grad_method = "A"
+
shift = 0.1
- grad_recipe = [(0.5 / math.sinh(shift), shift), None]
+ multiplier = 0.5 / math.sinh(shift)
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)
@staticmethod
def _heisenberg_rep(p):
@@ -326,8 +333,11 @@ class QuadraticPhase(CVOperation):
par_domain = "R"
grad_method = "A"
+
shift = 0.1
- grad_recipe = [(0.5 / shift, shift)]
+ multiplier = 0.5 / shift
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)
@staticmethod
def _heisenberg_rep(p):
@@ -371,8 +381,11 @@ class ControlledAddition(CVOperation):
par_domain = "R"
grad_method = "A"
+
shift = 0.1
- grad_recipe = [(0.5 / shift, shift)]
+ multiplier = 0.5 / shift
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)
@staticmethod
def _heisenberg_rep(p):
@@ -417,8 +430,11 @@ class ControlledPhase(CVOperation):
par_domain = "R"
grad_method = "A"
+
shift = 0.1
- grad_recipe = [(0.5 / shift, shift)]
+ multiplier = 0.5 / shift
+ a = 1
+ grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)
@staticmethod
def _heisenberg_rep(p):
diff --git a/pennylane/qnodes/cv.py b/pennylane/qnodes/cv.py
index d1015fe1f48..11a0798b58f 100644
--- a/pennylane/qnodes/cv.py
+++ b/pennylane/qnodes/cv.py
@@ -181,20 +181,37 @@ def _pd_analytic(self, idx, args, kwargs, **options):
temp_var.idx = n
op.data[p_idx] = temp_var
- multiplier, shift = op.get_parameter_shift(p_idx)
-
- # shifted parameter values
- shift_p1 = np.r_[args, args[idx] + shift]
- shift_p2 = np.r_[args, args[idx] - shift]
+ param_shift = op.get_parameter_shift(p_idx)
if not force_order2 and op.use_method != "B":
# basic parameter-shift method, for Gaussian CV gates
# succeeded by order-1 observables
- # evaluate the circuit at two points with shifted parameter values
- y2 = np.asarray(self.evaluate(shift_p1, kwargs))
- y1 = np.asarray(self.evaluate(shift_p2, kwargs))
- pd += (y2 - y1) * multiplier
+ # evaluate the circuit at multiple points with the linear
+ # combination of parameter values (in most cases at two points)
+ for multiplier, a, shift in param_shift:
+
+ # shifted parameter values
+ shift_p = np.r_[args, a * args[idx] + shift]
+
+ term = multiplier * np.asarray(self.evaluate(shift_p, kwargs))
+ pd += term
else:
+ if len(param_shift) != 2:
+ # The 2nd order CV parameter-shift rule only accepts two-term shifts
+ raise NotImplementedError(
+ "Taking the analytic gradient for order-2 operators is "
+ "unsupported for {op} which contains a parameter with a "
+ "gradient recipe of more than two terms."
+ )
+
+ # Get the shifts and the multipliers
+ pos_multiplier, a1, pos_shift = param_shift[0]
+ neg_multiplier, a2, neg_shift = param_shift[1]
+
+ # shifted parameter values
+ shift_p1 = np.r_[args, a1 * args[idx] + pos_shift]
+ shift_p2 = np.r_[args, a2 * args[idx] + neg_shift]
+
# order-2 parameter-shift method, for gaussian CV gates
# succeeded by order-2 observables
# evaluate transformed observables at the original parameter point
@@ -203,7 +220,7 @@ def _pd_analytic(self, idx, args, kwargs, **options):
Z2 = op.heisenberg_tr(self.device.wires)
self._set_variables(shift_p2, kwargs)
Z1 = op.heisenberg_tr(self.device.wires)
- Z = (Z2 - Z1) * multiplier # derivative of the operation
+ Z = pos_multiplier * Z2 + neg_multiplier * Z1 # derivative of the operation
unshifted_args = np.r_[args, args[idx]]
self._set_variables(unshifted_args, kwargs)
diff --git a/pennylane/qnodes/qubit.py b/pennylane/qnodes/qubit.py
index 740be9fd073..eaa421173d8 100644
--- a/pennylane/qnodes/qubit.py
+++ b/pennylane/qnodes/qubit.py
@@ -128,16 +128,18 @@ def _pd_analytic(self, idx, args, kwargs, **options):
temp_var.idx = n
op.data[p_idx] = temp_var
- multiplier, shift = op.get_parameter_shift(p_idx)
+ param_shift = op.get_parameter_shift(p_idx)
- # shifted parameter values
- shift_p1 = np.r_[args, args[idx] + shift]
- shift_p2 = np.r_[args, args[idx] - shift]
+ for multiplier, a, shift in param_shift:
- # evaluate the circuit at two points with shifted parameter values
- y2 = np.asarray(self.evaluate(shift_p1, kwargs))
- y1 = np.asarray(self.evaluate(shift_p2, kwargs))
- pd += (y2 - y1) * multiplier
+ # shifted parameter values
+ shift_p = np.r_[args, a * args[idx] + shift]
+
+ # evaluate the circuit at point with shifted parameter values
+ y = np.asarray(self.evaluate(shift_p, kwargs))
+
+ # add the contribution to the partial derivative
+ pd += multiplier * y
# restore the original parameter
op.data[p_idx] = orig
diff --git a/pennylane/tape/qnode.py b/pennylane/tape/qnode.py
index 7e86e488b46..97de95d23f0 100644
--- a/pennylane/tape/qnode.py
+++ b/pennylane/tape/qnode.py
@@ -112,6 +112,7 @@ class QNode:
h=1e-7 (float): step size for the finite difference method
order=1 (int): The order of the finite difference method to use. ``1`` corresponds
to forward finite differences, ``2`` to centered finite differences.
+ shift=pi/2 (float): the size of the shift for two-term parameter-shift gradient computations
**Example**
diff --git a/pennylane/tape/tapes/cv_param_shift.py b/pennylane/tape/tapes/cv_param_shift.py
index 54e98e7a2e3..809cceed8b3 100644
--- a/pennylane/tape/tapes/cv_param_shift.py
+++ b/pennylane/tape/tapes/cv_param_shift.py
@@ -235,19 +235,20 @@ def parameter_shift_first_order(
op = self._par_info[t_idx]["op"]
p_idx = self._par_info[t_idx]["p_idx"]
- recipe = op.grad_recipe[p_idx]
- c, s = (0.5, np.pi / 2) if recipe is None else recipe
-
+ param_shift = op.get_parameter_shift(p_idx)
shift = np.zeros_like(params)
- shift[idx] = s
- shifted_forward = self.copy(copy_operations=True, tape_cls=QuantumTape)
- shifted_forward.set_parameters(params + shift)
+ coeffs = []
+ tapes = []
+ for c, _a, s in param_shift:
- shifted_backward = self.copy(copy_operations=True, tape_cls=QuantumTape)
- shifted_backward.set_parameters(params - shift)
+ shift[idx] = s
- tapes = [shifted_forward, shifted_backward]
+ # shifted parameter values
+ shifted_tape = self.copy(copy_operations=True, tape_cls=QuantumTape)
+ shifted_tape.set_parameters(params + shift)
+ coeffs.append(c)
+ tapes.append(shifted_tape)
def processing_fn(results):
"""Computes the gradient of the parameter at index idx via the
@@ -260,10 +261,7 @@ def processing_fn(results):
array[float]: 1-dimensional array of length determined by the tape output
measurement statistics
"""
- shifted_forward = np.array(results[0])
- shifted_backward = np.array(results[1])
-
- return c * (shifted_forward - shifted_backward)
+ return np.dot(coeffs, results)
return tapes, processing_fn
@@ -290,22 +288,33 @@ def parameter_shift_second_order(self, idx, params, **options):
dev_wires = options["dev_wires"]
- recipe = op.grad_recipe[p_idx]
- c, s = (0.5, np.pi / 2) if recipe is None else recipe
+ param_shift = op.get_parameter_shift(p_idx)
+
+ if len(param_shift) != 2:
+ # The 2nd order CV parameter-shift rule only accepts two-term shifts
+ raise NotImplementedError(
+ "Taking the analytic gradient for order-2 operators is "
+ "unsupported for {op} which contains a parameter with a "
+ "gradient recipe of more than two terms."
+ )
+
+ c1, a1, s1 = param_shift[0]
+ c2, a2, s2 = param_shift[1]
shift = np.zeros_like(params)
- shift[idx] = s
+ shift[idx] = s1
# evaluate transformed observables at the original parameter point
# first build the Heisenberg picture transformation matrix Z
- self.set_parameters(params + shift)
+ self.set_parameters(a1 * params + shift)
Z2 = op.heisenberg_tr(dev_wires)
- self.set_parameters(params - shift)
+ shift[idx] = s2
+ self.set_parameters(a2 * params + shift)
Z1 = op.heisenberg_tr(dev_wires)
# derivative of the operation
- Z = (Z2 - Z1) * c
+ Z = Z2 * c1 + Z1 * c2
self.set_parameters(params)
Z0 = op.heisenberg_tr(dev_wires, inverse=True)
diff --git a/pennylane/tape/tapes/jacobian_tape.py b/pennylane/tape/tapes/jacobian_tape.py
index 9939f5c9526..364cc969066 100644
--- a/pennylane/tape/tapes/jacobian_tape.py
+++ b/pennylane/tape/tapes/jacobian_tape.py
@@ -391,6 +391,7 @@ def jacobian(self, device, params=None, **options):
h=1e-7 (float): finite difference method step size
order=1 (int): The order of the finite difference method to use. ``1`` corresponds
to forward finite differences, ``2`` to centered finite differences.
+ shift=pi/2 (float): the size of the shift for two-term parameter-shift gradient computations
Returns:
array[float]: 2-dimensional array of shape ``(tape.num_params, tape.output_dim)``
diff --git a/pennylane/tape/tapes/qubit_param_shift.py b/pennylane/tape/tapes/qubit_param_shift.py
index 13ca0a1b805..0d5f11d327d 100644
--- a/pennylane/tape/tapes/qubit_param_shift.py
+++ b/pennylane/tape/tapes/qubit_param_shift.py
@@ -129,6 +129,9 @@ def parameter_shift(self, idx, params, **options):
idx (int): trainable parameter index to differentiate with respect to
params (list[Any]): the quantum tape operation parameters
+ Keyword Args:
+ shift=pi/2 (float): the size of the shift for two-term parameter-shift gradient computations
+
Returns:
tuple[list[QuantumTape], function]: A tuple containing the list of generated tapes,
in addition to a post-processing function to be applied to the evaluated
@@ -138,23 +141,21 @@ def parameter_shift(self, idx, params, **options):
op = self._par_info[t_idx]["op"]
p_idx = self._par_info[t_idx]["p_idx"]
- s = (
- np.pi / 2
- if op.grad_recipe is None or op.grad_recipe[p_idx] is None
- else op.grad_recipe[p_idx]
- )
- s = options.get("shift", s)
+ s = options.get("shift", np.pi / 2)
+ param_shift = op.get_parameter_shift(p_idx, shift=s)
shift = np.zeros_like(params)
- shift[idx] = s
+ coeffs = []
+ tapes = []
- shifted_forward = self.copy(copy_operations=True, tape_cls=QuantumTape)
- shifted_forward.set_parameters(params + shift)
+ for c, a, s in param_shift:
+ shift[idx] = s
- shifted_backward = self.copy(copy_operations=True, tape_cls=QuantumTape)
- shifted_backward.set_parameters(params - shift)
+ shifted_tape = self.copy(copy_operations=True, tape_cls=QuantumTape)
+ shifted_tape.set_parameters(a * params + shift)
- tapes = [shifted_forward, shifted_backward]
+ coeffs.append(c)
+ tapes.append(shifted_tape)
def processing_fn(results):
"""Computes the gradient of the parameter at index idx via the
@@ -167,9 +168,7 @@ def processing_fn(results):
array[float]: 1-dimensional array of length determined by the tape output
measurement statistics
"""
- res_forward = np.array(results[0])
- res_backward = np.array(results[1])
- return (res_forward - res_backward) / (2 * np.sin(s))
+ return np.dot(coeffs, results)
return tapes, processing_fn
@@ -181,6 +180,9 @@ def parameter_shift_var(self, idx, params, **options):
idx (int): trainable parameter index to differentiate with respect to
params (list[Any]): the quantum tape operation parameters
+ Keyword Args:
+ shift=pi/2 (float): the size of the shift for two-term parameter-shift gradient computations
+
Returns:
tuple[list[QuantumTape], function]: A tuple containing the list of generated tapes,
in addition to a post-processing function to be applied to the evaluated
diff --git a/tests/qnodes/test_qnode_cv.py b/tests/qnodes/test_qnode_cv.py
index 18ac83fbd1b..3fc8d265af4 100644
--- a/tests/qnodes/test_qnode_cv.py
+++ b/tests/qnodes/test_qnode_cv.py
@@ -613,3 +613,30 @@ def circuit(a):
with pytest.raises(ValueError, match=r"cannot be used with the argument\(s\) \{'a'\}"):
circuit.jacobian([1.0], method="A")
+
+ def test_error_unsupported_grad_recipe(self, monkeypatch):
+ """Test exception raised if attempting to use the second order rule for
+ computing the gradient analytically of an expectation value that
+ contains an operation with an more than two terms in the gradient recipe"""
+
+ class DummyOp(qml.operation.CVOperation):
+ num_wires = 1
+ num_params = 1
+ par_domain = "R"
+ grad_method = "A"
+ grad_recipe = ([[1, 1, 1], [1, 1, 1], [1, 1, 1]],)
+
+ dev = qml.device("default.gaussian", wires=1)
+
+ dev._operation_map["DummyOp"] = None
+
+ def circuit(a):
+ DummyOp(a, wires=[0])
+ return qml.expval(qml.NumberOperator(0))
+
+ with monkeypatch.context() as m:
+ circuit = CVQNode(circuit, dev, force_order2=True)
+
+ m.setattr(circuit, "_best_method", lambda arg: "A")
+ with pytest.raises(NotImplementedError, match=r"analytic gradient for order-2 operators is unsupported"):
+ grad_A = circuit.jacobian(0, method="A", options={'force_order2': True})
diff --git a/tests/tape/tapes/test_cv_param_shift.py b/tests/tape/tapes/test_cv_param_shift.py
index 5ebcc4e1a65..1525b8430a8 100644
--- a/tests/tape/tapes/test_cv_param_shift.py
+++ b/tests/tape/tapes/test_cv_param_shift.py
@@ -755,6 +755,35 @@ def test_error_analytic_second_order(self):
with pytest.raises(ValueError, match=r"cannot be used with the argument\(s\) \{0\}"):
tape.jacobian(dev, method="analytic")
+ def test_error_unsupported_grad_recipe(self, monkeypatch):
+ """Test exception raised if attempting to use the second order rule for
+ computing the gradient analytically of an expectation value that
+ contains an operation with more than two terms in the gradient recipe"""
+
+ class DummyOp(qml.operation.CVOperation):
+ num_wires = 1
+ num_params = 1
+ par_domain = "R"
+ grad_method = "A"
+ grad_recipe = ([[1, 1, 1], [1, 1, 1], [1, 1, 1]],)
+
+ dev = qml.device("default.gaussian", wires=1)
+
+ dev.operations.add(DummyOp)
+
+
+ with CVParamShiftTape() as tape:
+ DummyOp(1, wires=[0])
+ qml.expval(qml.X(0))
+
+ with monkeypatch.context() as m:
+ m.setattr(tape, "_grad_method_validation", lambda *args: ('A',))
+ tape._par_info[0]["grad_method"] = 'A'
+ tape.trainable_params = {0}
+
+ with pytest.raises(NotImplementedError, match=r"analytic gradient for order-2 operators is unsupported"):
+ tape.jacobian(dev, method="analytic", force_order2=True)
+
cv_ops = [getattr(qml, name) for name in qml.ops._cv__ops__]
analytic_cv_ops = [cls for cls in cv_ops if cls.supports_parameter_shift]