# Copyright 2018-2022 Xanadu Quantum Technologies Inc.# Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at# http://www.apache.org/licenses/LICENSE-2.0# Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License."""Differentiable quantum functions"""importfunctools# pylint: disable=import-outside-toplevelimportitertoolsfromstringimportascii_lettersasABCimportscipyasspimportscipy.sparse.linalgassplafromautorayimportnumpyasnpfromnumpyimportfloat64,sqrt# pylint:disable=wrong-import-orderfromscipy.sparseimportcsc_matrix,issparseimportpennylaneasqmlfrom.importsingle_dispatch# pylint:disable=unused-importfrom.interface_utilsimportget_interfacefrom.matrix_manipulationimport_permute_dense_matrixfrom.multi_dispatchimportdiag,dot,einsum,scatter_element_addfrom.utilsimportallclose,cast,cast_like,convert_like,is_abstractABC_ARRAY=np.array(list(ABC))
[docs]defcov_matrix(prob,obs,wires=None,diag_approx=False):"""Calculate the covariance matrix of a list of commuting observables, given the joint probability distribution of the system in the shared eigenbasis. .. note:: This method only works for **commuting observables.** If the probability distribution is the result of a quantum circuit, the quantum state must be rotated into the shared eigenbasis of the list of observables before measurement. Args: prob (tensor_like): probability distribution obs (list[.Observable]): a list of observables for which to compute the covariance matrix diag_approx (bool): if True, return the diagonal approximation wires (.Wires): The wire register of the system. If not provided, it is assumed that the wires are labelled with consecutive integers. Returns: tensor_like: the covariance matrix of size ``(len(obs), len(obs))`` **Example** Consider the following ansatz and observable list: >>> obs_list = [qml.X(0) @ qml.Z(1), qml.Y(2)] >>> ansatz = qml.templates.StronglyEntanglingLayers We can construct a QNode to output the probability distribution in the shared eigenbasis of the observables: .. code-block:: python from pennylane import numpy as np dev = qml.device("default.qubit", wires=3) @qml.qnode(dev, interface="autograd") def circuit(weights): ansatz(weights, wires=[0, 1, 2]) # rotate into the basis of the observables for o in obs_list: o.diagonalizing_gates() return qml.probs(wires=[0, 1, 2]) We can now compute the covariance matrix: >>> shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=3) >>> weights = np.random.random(shape, requires_grad=True) >>> cov = qml.math.cov_matrix(circuit(weights), obs_list) >>> cov tensor([[0.98125435, 0.4905541 ], [0.4905541 , 0.99920878]], requires_grad=True) Autodifferentiation is fully supported using all interfaces. Here we use autograd: >>> cost_fn = lambda weights: qml.math.cov_matrix(circuit(weights), obs_list)[0, 1] >>> qml.grad(cost_fn)(weights) array([[[ 4.94240914e-17, -2.33786398e-01, -1.54193959e-01], [-3.05414996e-17, 8.40072236e-04, 5.57884080e-04], [ 3.01859411e-17, 8.60411436e-03, 6.15745204e-04]], [[ 6.80309533e-04, -1.23162742e-03, 1.08729813e-03], [-1.53863193e-01, -1.38700657e-02, -1.36243323e-01], [-1.54665054e-01, -1.89018172e-02, -1.56415558e-01]]]) """variances=[]# diagonal variancesfori,oinenumerate(obs):eigvals=cast(o.eigvals(),dtype=float64)w=o.wires.labelsifwiresisNoneelsewires.indices(o.wires)p=marginal_prob(prob,w)res=dot(eigvals**2,p)-(dot(eigvals,p))**2variances.append(res)cov=diag(variances)ifdiag_approx:returncovfori,jinitertools.combinations(range(len(obs)),r=2):o1=obs[i]o2=obs[j]o1wires=o1.wires.labelsifwiresisNoneelsewires.indices(o1.wires)o2wires=o2.wires.labelsifwiresisNoneelsewires.indices(o2.wires)shared_wires=set(o1wires+o2wires)l1=cast(o1.eigvals(),dtype=float64)l2=cast(o2.eigvals(),dtype=float64)l12=cast(np.kron(l1,l2),dtype=float64)p1=marginal_prob(prob,o1wires)p2=marginal_prob(prob,o2wires)p12=marginal_prob(prob,shared_wires)res=dot(l12,p12)-dot(l1,p1)*dot(l2,p2)cov=scatter_element_add(cov,[i,j],res)cov=scatter_element_add(cov,[j,i],res)returncov
[docs]defmarginal_prob(prob,axis):"""Compute the marginal probability given a joint probability distribution expressed as a tensor. Each random variable corresponds to a dimension. If the distribution arises from a quantum circuit measured in computational basis, each dimension corresponds to a wire. For example, for a 2-qubit quantum circuit `prob[0, 1]` is the probability of measuring the first qubit in state 0 and the second in state 1. Args: prob (tensor_like): 1D tensor of probabilities. This tensor should of size ``(2**N,)`` for some integer value ``N``. axis (list[int]): the axis for which to calculate the marginal probability distribution Returns: tensor_like: the marginal probabilities, of size ``(2**len(axis),)`` **Example** >>> x = tf.Variable([1, 0, 0, 1.], dtype=tf.float64) / np.sqrt(2) >>> marginal_prob(x, axis=[0, 1]) <tf.Tensor: shape=(4,), dtype=float64, numpy=array([0.70710678, 0. , 0. , 0.70710678])> >>> marginal_prob(x, axis=[0]) <tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70710678, 0.70710678])> """prob=np.flatten(prob)num_wires=int(np.log2(len(prob)))ifnum_wires==len(axis):returnprobinactive_wires=tuple(set(range(num_wires))-set(axis))prob=np.reshape(prob,[2]*num_wires)prob=np.sum(prob,axis=inactive_wires)returnnp.flatten(prob)
[docs]defreduce_dm(density_matrix,indices,check_state=False,c_dtype="complex128"):"""Compute the density matrix from a state represented with a density matrix. Args: density_matrix (tensor_like): 2D or 3D density matrix tensor. This tensor should be of size ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)``, for some integer number of wires``N``. indices (list(int)): List of indices in the considered subsystem. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: tensor_like: Density matrix of size ``(2**len(indices), 2**len(indices))`` or ``(batch_dim, 2**len(indices), 2**len(indices))`` .. seealso:: :func:`pennylane.math.reduce_statevector`, and :func:`pennylane.density_matrix` **Example** >>> x = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) >>> reduce_dm(x, indices=[0]) [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]] >>> y = [[0.5, 0, 0.5, 0], [0, 0, 0, 0], [0.5, 0, 0.5, 0], [0, 0, 0, 0]] >>> reduce_dm(y, indices=[0]) [[0.5+0.j 0.5+0.j] [0.5+0.j 0.5+0.j]] >>> reduce_dm(y, indices=[1]) [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]] >>> z = tf.Variable([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=tf.complex128) >>> reduce_dm(z, indices=[1]) tf.Tensor( [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]], shape=(2, 2), dtype=complex128) >>> x = np.array([[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], ... [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]) >>> reduce_dm(x, indices=[1]) array([[[1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 1.+0.j]]]) """density_matrix=cast(density_matrix,dtype=c_dtype)ifcheck_state:_check_density_matrix(density_matrix)iflen(np.shape(density_matrix))==2:batch_dim,dim=None,density_matrix.shape[0]else:batch_dim,dim=density_matrix.shape[:2]num_indices=int(np.log2(dim))consecutive_indices=list(range(num_indices))# Return the full density matrix if all the wires are given, potentially permutediflen(indices)==num_indices:return_permute_dense_matrix(density_matrix,consecutive_indices,indices,batch_dim)ifbatch_dimisNone:density_matrix=qml.math.stack([density_matrix])# Compute the partial tracetraced_wires=[xforxinconsecutive_indicesifxnotinindices]density_matrix=partial_trace(density_matrix,traced_wires,c_dtype=c_dtype)ifbatch_dimisNone:density_matrix=density_matrix[0]# Permute the remaining indices of the density matrixreturn_permute_dense_matrix(density_matrix,sorted(indices),indices,batch_dim)
[docs]defpartial_trace(matrix,indices,c_dtype="complex128"):"""Compute the reduced density matrix by tracing out the provided indices. Args: matrix (tensor_like): 2D or 3D density matrix tensor. For a 2D tensor, the size is assumed to be ``(2**n, 2**n)``, for some integer number of wires ``n``. For a 3D tensor, the first dimension is assumed to be the batch dimension, ``(batch_dim, 2**N, 2**N)``. indices (list(int)): List of indices to be traced. Returns: tensor_like: (reduced) Density matrix of size ``(2**len(wires), 2**len(wires))`` .. seealso:: :func:`pennylane.math.reduce_dm`, and :func:`pennylane.math.reduce_statevector` **Example** We can compute the partial trace of the matrix ``x`` with respect to its 0th index. >>> x = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) >>> partial_trace(x, indices=[0]) array([[1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]]) We can also pass a batch of matrices ``x`` to the function and return the partial trace of each matrix with respect to each matrix's 0th index. >>> x = np.array([ ... [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], ... [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ... ]) >>> partial_trace(x, indices=[0]) array([[[1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 1.+0.j]]]) The partial trace can also be computed with respect to multiple indices within different frameworks such as TensorFlow. >>> x = tf.Variable([[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], ... [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]]], dtype=tf.complex128) >>> partial_trace(x, indices=[1]) <tf.Tensor: shape=(2, 2, 2), dtype=complex128, numpy= array([[[1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 1.+0.j]]])> """# Autograd does not support same indices sum in backprop, and tensorflow# has a limit of 8 dimensions if same indices are usedmatrix=cast(matrix,dtype=c_dtype)ifqml.math.ndim(matrix)==2:is_batched=Falsebatch_dim,dim=1,matrix.shape[1]else:is_batched=Truebatch_dim,dim=matrix.shape[:2]ifget_interface(matrix)in["autograd","tensorflow"]:return_batched_partial_trace_nonrep_indices(matrix,is_batched,indices,batch_dim,dim)# Dimension and reshapenum_indices=int(np.log2(dim))rho_dim=2*num_indicesmatrix=np.reshape(matrix,[batch_dim]+[2]*2*num_indices)indices=np.sort(indices)# For loop over wiresfori,target_indexinenumerate(indices):target_index=target_index-istate_indices=ABC[1:rho_dim-2*i+1]state_indices=list(state_indices)target_letter=state_indices[target_index]state_indices[target_index+num_indices-i]=target_letterstate_indices="".join(state_indices)einsum_indices=f"a{state_indices}"matrix=einsum(einsum_indices,matrix)number_wires_sub=num_indices-len(indices)reduced_density_matrix=np.reshape(matrix,(batch_dim,2**number_wires_sub,2**number_wires_sub))returnreduced_density_matrixifis_batchedelsereduced_density_matrix[0]
def_batched_partial_trace_nonrep_indices(matrix,is_batched,indices,batch_dim,dim):"""Compute the reduced density matrix for autograd interface by tracing out the provided indices with the use of projectors as same subscripts indices are not supported in autograd backprop. """num_indices=int(np.log2(dim))rho_dim=2*num_indicesmatrix=np.reshape(matrix,[batch_dim]+[2]*2*num_indices)kraus=cast(np.eye(2),matrix.dtype)kraus=np.reshape(kraus,(2,1,2))kraus_dagger=np.asarray([np.conj(np.transpose(k))forkinkraus])kraus=convert_like(kraus,matrix)kraus_dagger=convert_like(kraus_dagger,matrix)# For loop over wiresfortarget_wireinindices:# Tensor indices of density matrixstate_indices=ABC[1:rho_dim+1]# row indices of the quantum state affected by this operationrow_wires_list=[target_wire+1]row_indices="".join(ABC_ARRAY[row_wires_list].tolist())# column indices are shifted by the number of wirescol_wires_list=[w+num_indicesforwinrow_wires_list]col_indices="".join(ABC_ARRAY[col_wires_list].tolist())# indices in einsum must be replaced with new onesnum_partial_trace_wires=1new_row_indices=ABC[rho_dim+1:rho_dim+num_partial_trace_wires+1]new_col_indices=ABC[rho_dim+num_partial_trace_wires+1:rho_dim+2*num_partial_trace_wires+1]# index for summation over Kraus operatorskraus_index=ABC[rho_dim+2*num_partial_trace_wires+1:rho_dim+2*num_partial_trace_wires+2]# new state indices replace row and column indices with new onesnew_state_indices=functools.reduce(lambdaold_string,idx_pair:old_string.replace(idx_pair[0],idx_pair[1]),zip(col_indices+row_indices,new_col_indices+new_row_indices),state_indices,)# index mapping for einsum, e.g., 'iga,abcdef,idh->gbchef'einsum_indices=(f"{kraus_index}{new_row_indices}{row_indices}, a{state_indices},"f"{kraus_index}{col_indices}{new_col_indices}->a{new_state_indices}")matrix=einsum(einsum_indices,kraus,matrix,kraus_dagger)number_wires_sub=num_indices-len(indices)reduced_density_matrix=np.reshape(matrix,(batch_dim,2**number_wires_sub,2**number_wires_sub))returnreduced_density_matrixifis_batchedelsereduced_density_matrix[0]
[docs]defreduce_statevector(state,indices,check_state=False,c_dtype="complex128"):"""Compute the density matrix from a state vector. Args: state (tensor_like): 1D or 2D tensor state vector. This tensor should of size ``(2**N,)`` or ``(batch_dim, 2**N)``, for some integer value ``N``. indices (list(int)): List of indices in the considered subsystem. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: tensor_like: Density matrix of size ``(2**len(indices), 2**len(indices))`` or ``(batch_dim, 2**len(indices), 2**len(indices))`` .. seealso:: :func:`pennylane.math.reduce_dm` and :func:`pennylane.density_matrix` **Example** >>> x = np.array([1, 0, 0, 0]) >>> reduce_statevector(x, indices=[0]) [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]] >>> y = [1, 0, 1, 0] / np.sqrt(2) >>> reduce_statevector(y, indices=[0]) [[0.5+0.j 0.5+0.j] [0.5+0.j 0.5+0.j]] >>> reduce_statevector(y, indices=[1]) [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]] >>> z = tf.Variable([1, 0, 0, 0], dtype=tf.complex128) >>> reduce_statevector(z, indices=[1]) tf.Tensor( [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]], shape=(2, 2), dtype=complex128) >>> x = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) >>> reduce_statevector(x, indices=[1]) array([[[1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j]], [[0.+0.j, 0.+0.j], [0.+0.j, 1.+0.j]]]) """state=cast(state,dtype=c_dtype)# Check the format and norm of the state vectorifcheck_state:_check_state_vector(state)iflen(np.shape(state))==1:batch_dim,dim=None,np.shape(state)[0]else:batch_dim,dim=np.shape(state)[:2]# batch dim exists but is unknown; cast to int so that reshaping worksifbatch_dimisNone:batch_dim=-1# Get dimension of the quantum system and reshapenum_wires=int(np.log2(dim))consecutive_wires=list(range(num_wires))ifbatch_dimisNone:state=qml.math.stack([state])state=np.reshape(state,[batch_dimifbatch_dimisnotNoneelse1]+[2]*num_wires)# Get the system to be traced# traced_system = [x + 1 for x in consecutive_wires if x not in indices]# trace out the subsystemindices1=ABC[1:num_wires+1]indices2="".join([ABC[num_wires+i+1]ifiinindiceselseABC[i+1]foriinconsecutive_wires])target="".join([ABC[i+1]foriinsorted(indices)]+[ABC[num_wires+i+1]foriinsorted(indices)])density_matrix=einsum(f"a{indices1},a{indices2}->a{target}",state,np.conj(state),optimize="greedy",)# Return the reduced density matrix by using numpy tensor product# density_matrix = np.tensordot(state, np.conj(state), axes=(traced_system, traced_system))ifbatch_dimisNone:density_matrix=np.reshape(density_matrix,(2**len(indices),2**len(indices)))else:density_matrix=np.reshape(density_matrix,(batch_dim,2**len(indices),2**len(indices)))return_permute_dense_matrix(density_matrix,sorted(indices),indices,batch_dim)
[docs]defdm_from_state_vector(state,check_state=False,c_dtype="complex128"):""" Convenience function to compute a (full) density matrix from a state vector. Args: state (tensor_like): 1D or 2D tensor state vector. This tensor should of size ``(2**N,)`` or ``(batch_dim, 2**N)``, for some integer value ``N``. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: tensor_like: Density matrix of size ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` **Example** >>> x = np.array([1, 0, 1j, 0]) / np.sqrt(2) >>> dm_from_state_vector(x) array([[0.5+0.j , 0. +0.j , 0. -0.5j, 0. +0.j ], [0. +0.j , 0. +0.j , 0. +0.j , 0. +0.j ], [0. +0.5j, 0. +0.j , 0.5+0.j , 0. +0.j ], [0. +0.j , 0. +0.j , 0. +0.j , 0. +0.j ]]) """num_wires=int(np.log2(np.shape(state)[-1]))returnreduce_statevector(state,indices=list(range(num_wires)),check_state=check_state,c_dtype=c_dtype,)
[docs]defpurity(state,indices,check_state=False,c_dtype="complex128"):r"""Computes the purity of a density matrix. .. math:: \gamma = \text{Tr}(\rho^2) where :math:`\rho` is the density matrix. The purity of a normalized quantum state satisfies :math:`\frac{1}{d} \leq \gamma \leq 1`, where :math:`d` is the dimension of the Hilbert space. A pure state has a purity of 1. It is possible to compute the purity of a sub-system from a given state. To find the purity of the overall state, include all wires in the ``indices`` argument. Args: state (tensor_like): Density matrix of shape ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` indices (list(int)): List of indices in the considered subsystem. check_state (bool): If ``True``, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: Purity of the considered subsystem. **Example** >>> x = [[1/2, 0, 0, 1/2], [0, 0, 0, 0], [0, 0, 0, 0], [1/2, 0, 0, 1/2]] >>> purity(x, [0, 1]) 1.0 >>> purity(x, [0]) 0.5 >>> x = [[1/2, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1/2]] >>> purity(x, [0, 1]) 0.5 """# Cast as a c_dtype arraystate=cast(state,dtype=c_dtype)density_matrix=reduce_dm(state,indices,check_state)return_compute_purity(density_matrix)
def_compute_purity(density_matrix):"""Compute the purity from a density matrix Args: density_matrix (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` tensor for an integer `N`. Returns: float: Purity of the density matrix. **Example** >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_purity(x) 0.5 >>> x = [[1/2, 1/2], [1/2, 1/2]] >>> _compute_purity(x) 1 """batched=len(qml.math.shape(density_matrix))>2ifbatched:returnqml.math.real(qml.math.einsum("abc,acb->a",density_matrix,density_matrix))returnqml.math.real(qml.math.einsum("ab,ba",density_matrix,density_matrix))
[docs]defvn_entropy(state,indices,base=None,check_state=False,c_dtype="complex128"):r"""Compute the Von Neumann entropy from a density matrix on a given subsystem. It supports all interfaces (NumPy, Autograd, Torch, TensorFlow and Jax). .. math:: S( \rho ) = -\text{Tr}( \rho \log ( \rho )) Args: state (tensor_like): Density matrix of shape ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)``. indices (list(int)): List of indices in the considered subsystem. base (float): Base for the logarithm. If None, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: Von Neumann entropy of the considered subsystem. **Example** The entropy of a subsystem for any state vectors can be obtained. Here is an example for the maximally entangled state, where the subsystem entropy is maximal (default base for log is exponential). >>> x = [1, 0, 0, 1] / np.sqrt(2) >>> x = dm_from_state_vector(x) >>> vn_entropy(x, indices=[0]) 0.6931472 The logarithm base can be switched to 2 for example. >>> vn_entropy(x, indices=[0], base=2) 1.0 .. seealso:: :func:`pennylane.vn_entropy` """density_matrix=reduce_dm(state,indices,check_state,c_dtype)entropy=_compute_vn_entropy(density_matrix,base)returnentropy
def_compute_vn_entropy(density_matrix,base=None):"""Compute the Von Neumann entropy from a density matrix Args: density_matrix (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` tensor for an integer `N`. base (float, int): Base for the logarithm. If None, the natural logarithm is used. Returns: float: Von Neumann entropy of the density matrix. **Example** >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_vn_entropy(x) 0.6931472 >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_vn_entropy(x, base=2) 1.0 """# Change basis if necessaryifbase:div_base=np.log(base)else:div_base=1evs=qml.math.eigvalsh(density_matrix)evs=qml.math.where(evs>0,evs,1.0)entropy=qml.math.entr(evs)/div_basereturnentropy# pylint: disable=too-many-arguments
[docs]defmutual_info(state,indices0,indices1,base=None,check_state=False,c_dtype="complex128",):r"""Compute the mutual information between two subsystems given a state: .. math:: I(A, B) = S(\rho^A) + S(\rho^B) - S(\rho^{AB}) where :math:`S` is the von Neumann entropy. The mutual information is a measure of correlation between two subsystems. More specifically, it quantifies the amount of information obtained about one system by measuring the other system. It supports all interfaces (NumPy, Autograd, Torch, TensorFlow and Jax). Each state must be given as a density matrix. To find the mutual information given a pure state, call :func:`~.math.dm_from_state_vector` first. Args: state (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. indices0 (list[int]): List of indices in the first subsystem. indices1 (list[int]): List of indices in the second subsystem. base (float): Base for the logarithm. If None, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: Mutual information between the subsystems **Examples** The mutual information between subsystems for a state vector can be returned as follows: >>> x = np.array([1, 0, 0, 1]) / np.sqrt(2) >>> x = qml.math.dm_from_state_vector(x) >>> qml.math.mutual_info(x, indices0=[0], indices1=[1]) 1.3862943611198906 It is also possible to change the log basis. >>> qml.math.mutual_info(x, indices0=[0], indices1=[1], base=2) 2.0 Similarly the quantum state can be provided as a density matrix: >>> y = np.array([[1/2, 1/2, 0, 1/2], [1/2, 0, 0, 0], [0, 0, 0, 0], [1/2, 0, 0, 1/2]]) >>> qml.math.mutual_info(y, indices0=[0], indices1=[1]) 0.4682351577408206 .. seealso:: :func:`~.math.vn_entropy` and :func:`pennylane.mutual_info` """# the subsystems cannot overlapiflen([indexforindexinindices0ifindexinindices1])>0:raiseValueError("Subsystems for computing mutual information must not overlap.")return_compute_mutual_info(state,indices0,indices1,base=base,check_state=check_state,c_dtype=c_dtype,)
# pylint: disable=too-many-argumentsdef_compute_mutual_info(state,indices0,indices1,base=None,check_state=False,c_dtype="complex128",):"""Compute the mutual information between the subsystems."""all_indices=sorted([*indices0,*indices1])vn_entropy_1=vn_entropy(state,indices=indices0,base=base,check_state=check_state,c_dtype=c_dtype,)vn_entropy_2=vn_entropy(state,indices=indices1,base=base,check_state=check_state,c_dtype=c_dtype,)vn_entropy_12=vn_entropy(state,indices=all_indices,base=base,check_state=check_state,c_dtype=c_dtype,)returnvn_entropy_1+vn_entropy_2-vn_entropy_12def_check_hermitian_operator(operators):"""Check the shape, and if the matrix is hermitian."""dim=operators.shape[-1]if(len(operators.shape)notin(2,3)oroperators.shape[-2]!=dimornotnp.log2(dim).is_integer()):raiseValueError("Operator matrix must be of shape (2**wires,2**wires) ""or (batch_dim, 2**wires, 2**wires).")iflen(operators.shape)==2:operators=qml.math.stack([operators])ifnotis_abstract(operators):foropsinoperators:conj_trans=np.transpose(np.conj(ops))ifnotallclose(ops,conj_trans):raiseValueError("The matrix is not Hermitian.")
[docs]defexpectation_value(operator_matrix,state_vector,check_state=False,check_operator=False,c_dtype="complex128"):r"""Compute the expectation value of an operator with respect to a pure state. The expectation value is the probabilistic expected result of an experiment. Given a pure state, i.e., a state which can be represented as a single vector :math:`\ket{\psi}` in the Hilbert space, the expectation value of an operator :math:`A` can computed as .. math:: \langle A \rangle_\psi = \bra{\psi} A \ket{\psi} Args: operator_matrix (tensor_like): operator matrix with shape ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)``. state_vector (tensor_like): state vector with shape ``(2**N)`` or ``(batch_dim, 2**N)``. check_state (bool): if True, the function will check the validity of the state vector via its shape and the norm. check_operator (bool): if True, the function will check the validity of the operator via its shape and whether it is hermitian. c_dtype (str): complex floating point precision type. Returns: float: Expectation value of the operator for the state vector. **Example** The expectation value for any operator can obtained by passing their matrix representation as an argument. For example, for a 2 qubit state, we can compute the expectation value of the operator :math:`Z \otimes I` as >>> import pennylane as qml >>> import numpy as np >>> state_vector = [1 / np.sqrt(2), 0, 1 / np.sqrt(2), 0] >>> operator_matrix = qml.matrix(qml.PauliZ(0), wire_order=[0, 1]) >>> qml.math.expectation_value(operator_matrix, state_vector) tensor(-2.23711432e-17+0.j, requires_grad=True) .. seealso:: :func:`pennylane.math.fidelity` """state_vector=cast(state_vector,dtype=c_dtype)operator_matrix=cast(operator_matrix,dtype=c_dtype)ifcheck_state:_check_state_vector(state_vector)ifcheck_operator:_check_hermitian_operator(operator_matrix)ifqml.math.shape(operator_matrix)[-1]!=qml.math.shape(state_vector)[-1]:raiseqml.QuantumFunctionError("The operator and the state vector must have the same number of wires.")# The overlap <psi|A|psi>expval=qml.math.einsum("...i,...i->...",qml.math.conj(state_vector),qml.math.einsum("...ji,...i->...j",operator_matrix,state_vector,optimize="greedy"),optimize="greedy",)returnexpval
# pylint: disable=too-many-arguments
[docs]defvn_entanglement_entropy(state,indices0,indices1,base=None,check_state=False,c_dtype="complex128"):r"""Compute the Von Neumann entanglement entropy between two subsystems in a given state. .. math:: S(\rho_A) = -\text{Tr}[\rho_A \log \rho_A] = -\text{Tr}[\rho_B \log \rho_B] = S(\rho_B) where :math:`S` is the von Neumann entropy, and :math:`\rho_A = \text{Tr}_B [\rho_{AB}]` and :math:`\rho_B = \text{Tr}_A [\rho_{AB}]` are the reduced density matrices for each partition. The Von Neumann entanglement entropy is a measure of the degree of quantum entanglement between two subsystems constituting a pure bipartite quantum state. The entropy of entanglement is the Von Neumann entropy of the reduced density matrix for any of the subsystems. If it is non-zero, it indicates the two subsystems are entangled. Each state must be given as a density matrix. To find the mutual information given a pure state, call :func:`~.math.dm_from_state_vector` first. Args: state (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. indices0 (list[int]): Indices of the qubits in the first subsystem. indices1 (list[int]): Indices of the qubits in the second subsystem. base (float): Base for the logarithm. If ``None``, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: The von Neumann entanglement entropy of the bipartite state. **Examples** The entanglement entropy between subsystems for a state vector can be returned as follows: >>> x = np.array([0, -1, 1, 0]) / np.sqrt(2) >>> x = qml.math.dm_from_state_vector(x) >>> qml.math.vn_entanglement_entropy(x, indices0=[0], indices1=[1]) 0.6931471805599453 It is also possible to change the logarithm base: >>> qml.math.vn_entanglement_entropy(x, indices0=[0], indices1=[1], base=2) 1 Similarly, the quantum state can be provided as a density matrix: >>> y = np.array([[1, 1, -1, -1], [1, 1, -1, -1], [-1, -1, 1, 1], [-1, -1, 1, 1]]) * 0.25 >>> qml.math.vn_entanglement_entropy(y, indices0=[0], indices1=[1]) 0 """# The subsystems cannot overlapiflen([indexforindexinindices0ifindexinindices1])>0:raiseValueError("Subsystems for computing the entanglement entropy must not overlap.")return_compute_vn_entanglement_entropy(state,indices0,indices1,base=base,check_state=check_state,c_dtype=c_dtype)
def_compute_vn_entanglement_entropy(state,indices0,_,base=None,check_state=False,c_dtype="complex128"):"""Computes the Von Neumann entanglement entropy between the subsystems."""vn_entropy_1=vn_entropy(state,indices=indices0,base=base,check_state=check_state,c_dtype=c_dtype)# The Von Neumann entropy of the two subsystems should be the same if the overall state is a# pure state. Here we trust that the user only uses this function for pure states, and do not# perform any checks so that the code is compatible with jax.jitreturnvn_entropy_1
[docs]defsqrt_matrix(density_matrix):r"""Compute the square root matrix of a density matrix where :math:`\rho = \sqrt{\rho} \times \sqrt{\rho}` Args: density_matrix (tensor_like): 2D or 3D (with batching) density matrix of the quantum system. Returns: (tensor_like): Square root of the density matrix. """evs,vecs=qml.math.linalg.eigh(density_matrix)evs=qml.math.real(evs)evs=qml.math.where(evs>0.0,evs,0.0)ifnotis_abstract(evs):evs=qml.math.cast_like(evs,vecs)shape=qml.math.shape(density_matrix)iflen(shape)>2:# broadcasting casei=qml.math.cast_like(qml.math.convert_like(qml.math.eye(shape[-1]),evs),evs)sqrt_evs=qml.math.expand_dims(qml.math.sqrt(evs),1)*ireturnvecs@sqrt_evs@qml.math.conj(qml.math.transpose(vecs,(0,2,1)))returnvecs@qml.math.diag(qml.math.sqrt(evs))@qml.math.conj(qml.math.transpose(vecs))
defsqrt_matrix_sparse(sparse_matrix):r"""Compute the square root matrix of a positive-definite Hermitian matrix where :math:`\rho = \sqrt{\rho} \times \sqrt{\rho}` Args: sparse_matrix (sparse): 2D sparse matrix of the quantum system. Returns: (sparse): Square root of the sparse matrix. Even for data types like `csr_matrix` or `csc_matrix`, the output matrix is not guaranteed to be sparse as well. """ifnotissparse(sparse_matrix):raiseTypeError(f"sqrt_matrix_sparse currently only supports scipy.sparse matrices, but received {type(sparse_matrix)}. ")ifsparse_matrix.nnz==0:returnsparse_matrix# NOTE: the following steps should be re-visited in the future to establish# better understanding and control over the heuristics we chose# 1. choice of max iteration and tolerance for denman beavers, sc-85713# 2. different methods for sparse matrix square root, sc-85710return_denman_beavers_iterations(sparse_matrix,max_iter=100,tol=1e-10)def_inv_newton(M,guess):""" Compute the inverse of a matrix using Newton's method. Args: M (array-like): The matrix to be inverted. guess (array-like): An initial guess for the inverse of the matrix. Returns: array-like: An improved estimate of the inverse of the matrix. """return2*guess-guess@M@guessdef_denman_beavers_iterations(mat,max_iter=100,tol=1e-13):"""Compute matrix square root using the Denman-Beavers iteration. The Denman–Beavers iteration was introduced by E. D. Denman and A. N. Beavers in 1976 and stems from Newton-type methods originally used to compute the matrix sign function. In this adaptation for matrix square roots, two matrices (Y and Z) are refined in each step until convergence. This technique is often effective for sparse or structured matrices, particularly those that are positive semidefinite or invertible. Args: mat (sparse): Sparse input matrix max_iter (int): Maximum number of iterations tol (float): Convergence tolerance (absolute tolerance). Measured using the Frobenius norm of the difference between input mat and the square of the output. Returns: scipy.sparse.spmatrix: Square root of the input matrix Raises: LinAlgError: If matrix inversion fails ValueError: If NaN values or overflow are encountered during computation """ifmat.shape==(1,1):returnsqrt(mat)try:mat=csc_matrix(mat)Y=matZ=sp.sparse.eye(mat.shape[0],format="csc")# Keep track of previous iteration for convergence checkY_prev=Nonenorm_diff=Noneforiter_numinrange(max_iter):# Compute next iterationifiter_num<2:Zinv=spla.inv(Z)ifiter_num>0elseZYinv=spla.inv(Y)else:# Take Newton stepZinv=_inv_newton(Z,Zinv)Yinv=_inv_newton(Y,Yinv)Y=0.5*(Y+Zinv)Z=0.5*(Z+Yinv)# Check for NaN or infinite valuesifnot(np.all(np.isfinite(Y.data))andnp.all(np.isfinite(Z.data))):raiseValueError("Invalid values encountered during computation: nan or inf"f"Input matrix: {mat.toarray()}")# Check convergence every 10 iterationsifiter_num%10==0anditer_num>0:ifY_previsnotNone:# Compute Frobenius norm of differencediff=Y-Y_prevnorm_diff=spla.norm(diff)ifnorm_diff<tol:breakY_prev=Y.copy()numerical_error=spla.norm((Y@Y-mat))if(norm_diffandnorm_diff>tol)ornumerical_error>tol:raiseValueError(f"Convergence threshold not reached after {max_iter} iterations, "f"with final norm error {norm_diff} and numerical error {numerical_error}")returnYexceptRuntimeErrorase:raiseValueError("Invalid values encountered during matrix multiplication: "f"Input matrix: {mat.toarray()}"f"system error: {e}")fromedef_compute_relative_entropy(rho,sigma,base=None):r""" Compute the quantum relative entropy of density matrix rho with respect to sigma. .. math:: S(\rho\,\|\,\sigma)=-\text{Tr}(\rho\log\sigma)-S(\rho)=\text{Tr}(\rho\log\rho)-\text{Tr}(\rho\log\sigma) =\text{Tr}(\rho(\log\rho-\log\sigma)) where :math:`S` is the von Neumann entropy. """ifbase:div_base=np.log(base)else:div_base=1evs_rho,u_rho=qml.math.linalg.eigh(rho)evs_sig,u_sig=qml.math.linalg.eigh(sigma)# cast all eigenvalues to realevs_rho,evs_sig=np.real(evs_rho),np.real(evs_sig)# zero eigenvalues need to be treated very carefully here# we use the convention that 0 * log(0) = 0evs_sig=qml.math.where(evs_sig==0,0.0,evs_sig)rho_nonzero_mask=qml.math.where(evs_rho==0.0,False,True)ent=qml.math.entr(qml.math.where(rho_nonzero_mask,evs_rho,1.0))# whether the inputs are batchedrho_batched=len(qml.math.shape(rho))>2sig_batched=len(qml.math.shape(sigma))>2indices_rho="abc"ifrho_batchedelse"bc"indices_sig="abd"ifsig_batchedelse"bd"target="acd"ifrho_batchedorsig_batchedelse"cd"# the matrix of inner products between eigenvectors of rho and eigenvectors# of sigma; this is a doubly stochastic matrixrel=qml.math.einsum(f"{indices_rho},{indices_sig}->{target}",np.conj(u_rho),u_sig,optimize="greedy",)rel=np.abs(rel)**2ifsig_batched:evs_sig=qml.math.expand_dims(evs_sig,1)rel=qml.math.sum(qml.math.where(rel==0.0,0.0,np.log(evs_sig)*rel),-1)rel=-qml.math.sum(qml.math.where(rho_nonzero_mask,evs_rho*rel,0.0),-1)return(rel-ent)/div_base
[docs]defrelative_entropy(state0,state1,base=None,check_state=False,c_dtype="complex128"):r""" Compute the quantum relative entropy of one state with respect to another. .. math:: S(\rho\,\|\,\sigma)=-\text{Tr}(\rho\log\sigma)-S(\rho)=\text{Tr}(\rho\log\rho)-\text{Tr}(\rho\log\sigma) =\text{Tr}(\rho(\log\rho-\log\sigma)) Roughly speaking, quantum relative entropy is a measure of distinguishability between two quantum states. It is the quantum mechanical analog of relative entropy. Each state must be given as a density matrix. To find the relative entropy given a pure state, call :func:`~.math.dm_from_state_vector` first. Args: state0 (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. state1 (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. base (float): Base for the logarithm. If None, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: Quantum relative entropy of state0 with respect to state1 **Examples** The relative entropy between two equal states is always zero: >>> x = np.array([1, 0]) >>> x = qml.math.dm_from_state_vector(x) >>> qml.math.relative_entropy(x, x) 0.0 and the relative entropy between two non-equal pure states is always infinity: >>> y = np.array([1, 1]) / np.sqrt(2) >>> y = qml.math.dm_from_state_vector(y) >>> qml.math.relative_entropy(x, y) inf The quantum states can be provided as density matrices, allowing for computation of relative entropy between mixed states: >>> rho = np.array([[0.3, 0], [0, 0.7]]) >>> sigma = np.array([[0.5, 0], [0, 0.5]]) >>> qml.math.relative_entropy(rho, sigma) 0.08228288 It is also possible to change the log base: >>> qml.math.relative_entropy(rho, sigma, base=2) 0.1187091 """# Cast as a c_dtype arraystate0=cast(state0,dtype=c_dtype)# Cannot be cast_like if jitifnotis_abstract(state0):state1=cast_like(state1,state0)ifcheck_state:# pylint: disable=expression-not-assigned_check_density_matrix(state0)_check_density_matrix(state1)# Compare the number of wires on both subsystemsifqml.math.shape(state0)[-1]!=qml.math.shape(state1)[-1]:raiseqml.QuantumFunctionError("The two states must have the same number of wires.")return_compute_relative_entropy(state0,state1,base=base)
def_check_density_matrix(density_matrix):"""Check the shape, the trace and the positive semi-definitiveness of a matrix."""dim=density_matrix.shape[-1]if(len(density_matrix.shape)notin(2,3)ordensity_matrix.shape[-2]!=dimornotnp.log2(dim).is_integer()):raiseValueError("Density matrix must be of shape (2**N, 2**N) or (batch_dim, 2**N, 2**N).")iflen(density_matrix.shape)==2:density_matrix=qml.math.stack([density_matrix])ifnotis_abstract(density_matrix):fordmindensity_matrix:# Check tracetrace=np.trace(dm)ifnotallclose(trace,1.0,atol=1e-10):raiseValueError("The trace of the density matrix should be one.")# Check if the matrix is Hermitianconj_trans=np.transpose(np.conj(dm))ifnotallclose(dm,conj_trans):raiseValueError("The matrix is not Hermitian.")# Check if positive semi-definiteevs,_=qml.math.linalg.eigh(dm)evs=np.real(evs)evs_non_negative=[evforevinevsifev>=-1e-7]iflen(evs)!=len(evs_non_negative):raiseValueError("The matrix is not positive semi-definite.")def_check_state_vector(state_vector):"""Check the shape and the norm of a state vector."""dim=state_vector.shape[-1]iflen(np.shape(state_vector))notin(1,2)ornotnp.log2(dim).is_integer():raiseValueError("State vector must be of shape (2**wires,) or (batch_dim, 2**wires)")iflen(state_vector.shape)==1:state_vector=qml.math.stack([state_vector])# Check normifnotis_abstract(state_vector):forsvinstate_vector:norm=np.linalg.norm(sv,ord=2)ifnotallclose(norm,1.0,atol=1e-10):raiseValueError("Sum of amplitudes-squared does not equal one.")
[docs]defmax_entropy(state,indices,base=None,check_state=False,c_dtype="complex128"):r"""Compute the maximum entropy of a density matrix on a given subsystem. It supports all interfaces (NumPy, Autograd, Torch, TensorFlow and Jax). .. math:: S_{\text{max}}( \rho ) = \log( \text{rank} ( \rho )) Args: state (tensor_like): Density matrix of shape ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)``. indices (list(int)): List of indices in the considered subsystem. base (float): Base for the logarithm. If None, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: The maximum entropy of the considered subsystem. **Example** The maximum entropy of a subsystem for any state vector can be obtained by first calling :func:`~.math.dm_from_state_vector` on the input. Here is an example for the maximally entangled state, where the subsystem entropy is maximal (default base for log is exponential). >>> x = [1, 0, 0, 1] / np.sqrt(2) >>> x = dm_from_state_vector(x) >>> max_entropy(x, indices=[0]) 0.6931472 The logarithm base can be changed. For example: >>> max_entropy(x, indices=[0], base=2) 1.0 The maximum entropy can be obtained by providing a quantum state as a density matrix. For example: >>> y = [[1/2, 0, 0, 1/2], [0, 0, 0, 0], [0, 0, 0, 0], [1/2, 0, 0, 1/2]] >>> max_entropy(y, indices=[0]) 0.6931472 The maximum entropy is always greater or equal to the Von Neumann entropy. In this maximally entangled example, they are equal: >>> vn_entropy(x, indices=[0]) 0.6931472 However, in general, the Von Neumann entropy is lower: >>> x = [np.cos(np.pi/8), 0, 0, -1j*np.sin(np.pi/8)] >>> x = dm_from_state_vector(x) >>> vn_entropy(x, indices=[1]) 0.4164955 >>> max_entropy(x, indices=[1]) 0.6931472 """density_matrix=reduce_dm(state,indices,check_state,c_dtype)maximum_entropy=_compute_max_entropy(density_matrix,base)returnmaximum_entropy
def_compute_max_entropy(density_matrix,base):"""Compute the maximum entropy from a density matrix Args: density_matrix (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` tensor for an integer `N`. base (float, int): Base for the logarithm. If None, the natural logarithm is used. Returns: float: Maximum entropy of the density matrix. **Example** >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_max_entropy(x) 0.6931472 >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_max_entropy(x, base=2) 1.0 """# Change basis if necessaryifbase:div_base=np.log(base)else:div_base=1evs=qml.math.eigvalsh(density_matrix)evs=qml.math.real(evs)rank=qml.math.sum(evs/qml.math.where(evs>1e-8,evs,1.0),-1)maximum_entropy=qml.math.log(rank)/div_basereturnmaximum_entropy
[docs]defmin_entropy(state,indices,base=None,check_state=False,c_dtype="complex128"):r"""Compute the minimum entropy from a density matrix. .. math:: S_{\text{min}}( \rho ) = -\log( \max_{i} ( p_{i} )) Args: state (tensor_like): Density matrix of shape ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)``. indices (list(int)): List of indices in the considered subsystem. base (float): Base for the logarithm. If None, the natural logarithm is used. check_state (bool): If True, the function will check the state validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: The minimum entropy of the considered subsystem. **Example** The minimum entropy of a subsystem for any state vector can be obtained by first calling :func:`~.math.dm_from_state_vector` on the input. Here is an example for the maximally entangled state, where the subsystem entropy is maximal (default base for log is exponential). >>> x = [1, 0, 0, 1] / np.sqrt(2) >>> x = dm_from_state_vector(x) >>> min_entropy(x, indices=[0]) 0.6931472 The logarithm base can be changed. For example: >>> min_entropy(x, indices=[0], base=2) 1.0 The minimum entropy can be obtained by providing a quantum state as a density matrix. For example: >>> y = [[1/2, 0, 0, 1/2], [0, 0, 0, 0], [0, 0, 0, 0], [1/2, 0, 0, 1/2]] >>> min_entropy(y, indices=[0]) 0.6931472 The Von Neumann entropy is always greater than the minimum entropy. >>> x = [np.cos(np.pi/8), 0, 0, -1j*np.sin(np.pi/8)] >>> x = dm_from_state_vector(x) >>> vn_entropy(x, indices=[1]) 0.4164955 >>> min_entropy(x, indices=[1]) 0.1583472 """density_matrix=reduce_dm(state,indices,check_state,c_dtype)minimum_entropy=_compute_min_entropy(density_matrix,base)returnminimum_entropy
def_compute_min_entropy(density_matrix,base):r"""Compute the minimum entropy from a density matrix Args: density_matrix (tensor_like): ``(2**N, 2**N)`` tensor density matrix for an integer `N`. base (float, int): Base for the logarithm. If None, the natural logarithm is used. Returns: float: Minimum entropy of the density matrix. **Example** >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_min_entropy(x) 0.6931472 >>> x = [[1/2, 0], [0, 1/2]] >>> _compute_min_entropy(x, base=2) 1.0 """# Change basis if necessarydiv_base=np.log(base)ifbaseelse1evs,_=qml.math.linalg.eigh(density_matrix)evs=qml.math.real(evs)minimum_entropy=-qml.math.log(qml.math.max(evs))/div_basereturnminimum_entropy
[docs]deftrace_distance(state0,state1,check_state=False,c_dtype="complex128"):r""" Compute the trace distance between two quantum states. .. math:: T(\rho, \sigma)=\frac12\|\rho-\sigma\|_1 =\frac12\text{Tr}\left(\sqrt{(\rho-\sigma)^{\dagger}(\rho-\sigma)}\right) where :math:`\|\cdot\|_1` is the Schatten :math:`1`-norm. The trace distance measures how close two quantum states are. In particular, it upper-bounds the probability of distinguishing two quantum states. Args: state0 (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. state1 (tensor_like): ``(2**N, 2**N)`` or ``(batch_dim, 2**N, 2**N)`` density matrix. check_state (bool): If True, the function will check the states' validity (shape and norm). c_dtype (str): Complex floating point precision type. Returns: float: Trace distance between state0 and state1 **Examples** The trace distance between two equal states is always zero: >>> x = np.array([[1, 0], [0, 0]]) >>> qml.math.trace_distance(x, x) 0.0 It is possible to use state vectors by first transforming them into density matrices via the :func:`~reduce_statevector` function: >>> y = qml.math.reduce_statevector(np.array([0.2, np.sqrt(0.96)]), [0]) >>> qml.math.trace_distance(x, y) 0.9797958971132713 The quantum states can also be provided as batches of density matrices: >>> batch0 = np.array([np.eye(2) / 2, np.ones((2, 2)) / 2, np.array([[1, 0],[0, 0]])]) >>> batch1 = np.array([np.ones((2, 2)) / 2, np.ones((2, 2)) / 2, np.array([[1, 0],[0, 0]])]) >>> qml.math.trace_distance(batch0, batch1) array([0.5, 0. , 0. ]) If only one of the two states represent a single element, then the trace distances are taken with respect to that element: >>> rho = np.ones((2, 2)) / 2 >>> qml.math.trace_distance(rho, batch0) array([0.5 , 0. , 0.70710678]) """# Cast as a c_dtype arraystate0=cast(state0,dtype=c_dtype)# Cannot be cast_like if jitifnotis_abstract(state0):state1=cast_like(state1,state0)ifcheck_state:_check_density_matrix(state0)_check_density_matrix(state1)ifstate0.shape[-1]!=state1.shape[-1]:raiseqml.QuantumFunctionError("The two states must have the same number of wires.")iflen(state0.shape)==len(state1.shape)==3andstate0.shape[0]!=state1.shape[0]:raiseValueError("The two states must be batches of the same size, or one of them must contain a single ""element.")eigvals=qml.math.abs(qml.math.eigvalsh(state0-state1))returnqml.math.sum(eigvals,axis=-1)/2