# Copyright 2018-2021 Xanadu Quantum Technologies Inc.# Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at# http://www.apache.org/licenses/LICENSE-2.0# Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License."""Utility functions"""# pylint: disable=wrong-import-orderimportautorayasarimportnumpyas_npimportscipyassp# pylint: disable=import-outside-toplevelfromautograd.numpy.numpy_boxesimportArrayBoxfromautorayimportnumpyasnpfrom.importsingle_dispatch# pylint:disable=unused-importfrom.interface_utilsimportget_interface
[docs]defallequal(tensor1,tensor2,**kwargs):"""Returns True if two tensors are element-wise equal along a given axis. This function is equivalent to calling ``np.all(tensor1 == tensor2, **kwargs)``, but allows for ``tensor1`` and ``tensor2`` to differ in type. Args: tensor1 (tensor_like): tensor to compare tensor2 (tensor_like): tensor to compare **kwargs: Accepts any keyword argument that is accepted by ``np.all``, such as ``axis``, ``out``, and ``keepdims``. See the `NumPy documentation <https://numpy.org/doc/stable/reference/generated/numpy.all.html>`__ for more details. Returns: ndarray, bool: If ``axis=None``, a logical AND reduction is applied to all elements and a boolean will be returned, indicating if all elements evaluate to ``True``. Otherwise, a boolean NumPy array will be returned. **Example** >>> a = torch.tensor([1, 2]) >>> b = np.array([1, 2]) >>> allequal(a, b) True """t1=ar.to_numpy(tensor1)t2=ar.to_numpy(tensor2)returnnp.all(t1==t2,**kwargs)
def_allclose_sparse(a,b,rtol=1e-05,atol=1e-08):"""Compare two sparse matrices for approximate equality. Args: a, b: scipy sparse matrices to compare rtol (float): relative tolerance atol (float): absolute tolerance Returns: bool: True if matrices are approximately equal """if(a!=b).nnz==0:returnTruediff=abs(a-b)# Handle cases where the matrix might be emptymax_diff=diff.data.max()ifdiff.nnz>0else0max_b=abs(b).data.max()ifb.nnz>0else0returnmax_diff<=atol+rtol*max_bdef_allclose_mixed(a,b,rtol=1e-05,atol=1e-08,b_is_sparse=True):"""Helper function for comparing dense and sparse matrices with correct tolerance reference. Args: a: first matrix (dense or sparse) b: second matrix (sparse or dense) rtol: relative tolerance atol: absolute tolerance b_is_sparse: True if b is sparse matrix, False if a is sparse matrix Returns: bool: True if matrices are approximately equal """sparse=bifb_is_sparseelseadense=aifb_is_sparseelsebifsparse.nnz==0:returnnp.allclose(dense,0,rtol=rtol,atol=atol)ifdense.shape!=sparse.shape:returnFalseSIZE_THRESHOLD=10000ifnp.prod(dense.shape)<SIZE_THRESHOLD:# Use dense comparison but maintain b as referenceifb_is_sparse:returnnp.allclose(a,sparse.toarray(),rtol=rtol,atol=atol)returnnp.allclose(sparse.toarray(),b,rtol=rtol,atol=atol)dense_coords=dense.nonzero()sparse_coords=sparse.nonzero()coord_diff=set(zip(*dense_coords))^set(zip(*sparse_coords))ifcoord_diff:returnFalse# Maintain asymmetric comparison with correct referenceifb_is_sparse:a_data=dense[dense_coords]b_data=sparse.dataelse:a_data=sparse.datab_data=dense[sparse_coords]returnnp.allclose(a_data,b_data,rtol=rtol,atol=atol)
[docs]defallclose(a,b,rtol=1e-05,atol=1e-08,**kwargs):"""Wrapper around np.allclose, allowing tensors ``a`` and ``b`` to differ in type"""try:# Some frameworks may provide their own allclose implementation.# Try and use it if available.ifsp.sparse.issparse(a)andsp.sparse.issparse(b):return_allclose_sparse(a,b,rtol=rtol,atol=atol)ifsp.sparse.issparse(a):# pylint: disable=arguments-out-of-orderreturn_allclose_mixed(a,b,rtol=rtol,atol=atol,b_is_sparse=False)ifsp.sparse.issparse(b):return_allclose_mixed(a,b,rtol=rtol,atol=atol,b_is_sparse=True)res=np.allclose(a,b,rtol=rtol,atol=atol,**kwargs)except(TypeError,AttributeError,ImportError,RuntimeError):# Otherwise, convert the input to NumPy arrays.## TODO: replace this with a bespoke, framework agnostic# low-level implementation to avoid the NumPy conversion:## np.abs(a - b) <= atol + rtol * np.abs(b)#t1=ar.to_numpy(a)t2=ar.to_numpy(b)res=np.allclose(t1,t2,rtol=rtol,atol=atol,**kwargs)returnres
allclose.__doc__=_np.allclose.__doc__
[docs]defcast(tensor,dtype):"""Casts the given tensor to a new type. Args: tensor (tensor_like): tensor to cast dtype (str, np.dtype): Any supported NumPy dtype representation; this can be a string (``"float64"``), a ``np.dtype`` object (``np.dtype("float64")``), or a dtype class (``np.float64``). If ``tensor`` is not a NumPy array, the **equivalent** dtype in the dispatched framework is used. Returns: tensor_like: a tensor with the same shape and values as ``tensor`` and the same dtype as ``dtype`` **Example** We can use NumPy dtype specifiers: >>> x = torch.tensor([1, 2]) >>> cast(x, np.float64) tensor([1., 2.], dtype=torch.float64) We can also use strings: >>> x = tf.Variable([1, 2]) >>> cast(x, "complex128") <tf.Tensor: shape=(2,), dtype=complex128, numpy=array([1.+0.j, 2.+0.j])> """ifisinstance(tensor,(list,tuple,int,float,complex)):tensor=np.asarray(tensor)ifnotisinstance(dtype,str):try:dtype=np.dtype(dtype).nameexcept(AttributeError,TypeError,ImportError):dtype=getattr(dtype,"name",dtype)returnar.astype(tensor,ar.to_backend_dtype(dtype,like=ar.infer_backend(tensor)))
[docs]defcast_like(tensor1,tensor2):"""Casts a tensor to the same dtype as another. Args: tensor1 (tensor_like): tensor to cast tensor2 (tensor_like): tensor with corresponding dtype to cast to Returns: tensor_like: a tensor with the same shape and values as ``tensor1`` and the same dtype as ``tensor2`` **Example** >>> x = torch.tensor([1, 2]) >>> y = torch.tensor([3., 4.]) >>> cast_like(x, y) tensor([1., 2.]) """ifisinstance(tensor2,tuple)andlen(tensor2)>0:tensor2=tensor2[0]ifisinstance(tensor2,ArrayBox):dtype=ar.to_numpy(tensor2._value).dtype.type# pylint: disable=protected-accesselifnotis_abstract(tensor2):dtype=ar.to_numpy(tensor2).dtype.typeelse:dtype=tensor2.dtypereturncast(tensor1,dtype)
[docs]defconvert_like(tensor1,tensor2):"""Convert a tensor to the same type as another. Args: tensor1 (tensor_like): tensor to convert tensor2 (tensor_like): tensor with corresponding type to convert to Returns: tensor_like: a tensor with the same shape, values, and dtype as ``tensor1`` and the same type as ``tensor2``. **Example** >>> x = np.array([1, 2]) >>> y = tf.Variable([3, 4]) >>> convert_like(x, y) <tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])> """interface=get_interface(tensor2)ifinterface=="torch":dev=tensor2.devicereturnnp.asarray(tensor1,device=dev,like=interface)ifinterface=="scipy":returnsp.sparse.csr_matrix(tensor1)returnnp.asarray(tensor1,like=interface)
[docs]defis_abstract(tensor,like=None):"""Returns True if the tensor is considered abstract. Abstract arrays have no internal value, and are used primarily when tracing Python functions, for example, in order to perform just-in-time (JIT) compilation. Abstract tensors most commonly occur within a function that has been decorated using ``@tf.function`` or ``@jax.jit``. .. note:: Currently Autograd tensors and Torch tensors will always return ``False``. This is because: - Autograd does not provide JIT compilation, and - ``@torch.jit.script`` is not currently compatible with QNodes. Args: tensor (tensor_like): input tensor like (str): The name of the interface. Will be determined automatically if not provided. Returns: bool: whether the tensor is abstract or not **Example** Consider the following JAX function: .. code-block:: python import jax from jax import numpy as jnp def function(x): print("Value:", x) print("Abstract:", qml.math.is_abstract(x)) return jnp.sum(x ** 2) When we execute it, we see that the tensor is not abstract; it has known value: >>> x = jnp.array([0.5, 0.1]) >>> function(x) Value: [0.5, 0.1] Abstract: False Array(0.26, dtype=float32) However, if we use the ``@jax.jit`` decorator, the tensor will now be abstract: >>> x = jnp.array([0.5, 0.1]) >>> jax.jit(function)(x) Value: Traced<ShapedArray(float32[2])>with<DynamicJaxprTrace(level=0/1)> Abstract: True Array(0.26, dtype=float32) Note that JAX uses an abstract *shaped* array, so although we won't be able to include conditionals within our function that depend on the value of the tensor, we *can* include conditionals that depend on the shape of the tensor. Similarly, consider the following TensorFlow function: .. code-block:: python import tensorflow as tf def function(x): print("Value:", x) print("Abstract:", qml.math.is_abstract(x)) return tf.reduce_sum(x ** 2) >>> x = tf.Variable([0.5, 0.1]) >>> function(x) Value: <tf.Variable 'Variable:0' shape=(2,) dtype=float32, numpy=array([0.5, 0.1], dtype=float32)> Abstract: False <tf.Tensor: shape=(), dtype=float32, numpy=0.26> If we apply the ``@tf.function`` decorator, the tensor will now be abstract: >>> tf.function(function)(x) Value: <tf.Variable 'Variable:0' shape=(2,) dtype=float32> Abstract: True <tf.Tensor: shape=(), dtype=float32, numpy=0.26> """interface=likeorget_interface(tensor)ifinterface=="jax":importjaxfromjax.interpreters.partial_evalimportDynamicJaxprTracerifisinstance(tensor,(jax.interpreters.ad.JVPTracer,jax.interpreters.batching.BatchTracer,jax.interpreters.partial_eval.JaxprTracer,),):# Tracer objects will be used when computing gradients or applying transforms.# If the value of the tracer is known, it will contain a ConcreteArray.# Otherwise, it will be abstract.returnnotisinstance(tensor.aval,jax.core.ConcreteArray)returnisinstance(tensor,DynamicJaxprTracer)ifinterface=="tensorflow":importtensorflowastffromtensorflow.python.framework.opsimportEagerTensorreturnnotisinstance(tf.convert_to_tensor(tensor),EagerTensor)# Autograd does not have a JIT# QNodes do not currently support TorchScript:# NotSupportedError: Compiled functions can't take variable number of arguments or# use keyword-only arguments with defaults.returnFalse
defimport_should_record_backprop():# pragma: no cover"""Return should_record_backprop or an equivalent function from TensorFlow."""importtensorflow.pythonastfpyifhasattr(tfpy.eager.tape,"should_record_backprop"):fromtensorflow.python.eager.tapeimportshould_record_backpropelifhasattr(tfpy.eager.tape,"should_record"):fromtensorflow.python.eager.tapeimportshould_recordasshould_record_backpropelifhasattr(tfpy.eager.record,"should_record_backprop"):fromtensorflow.python.eager.recordimportshould_record_backpropelse:raiseImportError("Cannot import should_record_backprop from TensorFlow.")returnshould_record_backprop
[docs]defrequires_grad(tensor,interface=None):"""Returns True if the tensor is considered trainable. .. warning:: The implementation depends on the contained tensor type, and may be context dependent. For example, Torch tensors and PennyLane tensors track trainability as a property of the tensor itself. TensorFlow, on the other hand, only tracks trainability if being watched by a gradient tape. Args: tensor (tensor_like): input tensor interface (str): The name of the interface. Will be determined automatically if not provided. Returns: bool: whether the tensor is trainable or not. **Example** Calling this function on a PennyLane NumPy array: >>> x = np.array([1., 5.], requires_grad=True) >>> requires_grad(x) True >>> x.requires_grad = False >>> requires_grad(x) False PyTorch has similar behaviour. With TensorFlow, the output is dependent on whether the tensor is currently being watched by a gradient tape: >>> x = tf.Variable([0.6, 0.1]) >>> requires_grad(x) False >>> with tf.GradientTape() as tape: ... print(requires_grad(x)) True While TensorFlow constants are by default not trainable, they can be manually watched by the gradient tape: >>> x = tf.constant([0.6, 0.1]) >>> with tf.GradientTape() as tape: ... print(requires_grad(x)) False >>> with tf.GradientTape() as tape: ... tape.watch([x]) ... print(requires_grad(x)) True """interface=interfaceorget_interface(tensor)ifinterface=="tensorflow":importtensorflowastfshould_record_backprop=import_should_record_backprop()returnshould_record_backprop([tf.convert_to_tensor(tensor)])ifinterface=="autograd":ifisinstance(tensor,ArrayBox):returnTruereturngetattr(tensor,"requires_grad",False)ifinterface=="torch":returngetattr(tensor,"requires_grad",False)ifinterfacein{"numpy","scipy"}:returnFalseifinterface=="jax":importjaxreturnisinstance(tensor,jax.core.Tracer)raiseValueError(f"Argument {tensor} is an unknown object")
[docs]defin_backprop(tensor,interface=None):"""Returns True if the tensor is considered to be in a backpropagation environment, it works for Autograd, TensorFlow and Jax. It is not only checking the differentiability of the tensor like :func:`~.requires_grad`, but rather checking if the gradient is actually calculated. Args: tensor (tensor_like): input tensor interface (str): The name of the interface. Will be determined automatically if not provided. Returns: bool: whether the tensor is in a backpropagation environment or not. **Example** >>> x = tf.Variable([0.6, 0.1]) >>> requires_grad(x) False >>> with tf.GradientTape() as tape: ... print(requires_grad(x)) True .. seealso:: :func:`~.requires_grad` """interface=interfaceorget_interface(tensor)ifinterface=="tensorflow":importtensorflowastfshould_record_backprop=import_should_record_backprop()returnshould_record_backprop([tf.convert_to_tensor(tensor)])ifinterface=="autograd":returnisinstance(tensor,ArrayBox)ifinterface=="jax":importjaxreturnisinstance(tensor,jax.core.Tracer)ifinterfacein{"numpy","scipy"}:returnFalseraiseValueError(f"Cannot determine if {tensor} is in backpropagation.")