Quantum Optimal Control using TensorFlow

  • David Schuster lab (2017): Speedup for quantum optimal control

    from automatic differentiation based on graphics processing units

  • David Schuster lab (2019): Gradient-based optimal control of

    open quantum systems using quantum trajectories and automatic differentiation

  • Benefits of TensorFlow

    • Arbitrary cost function, which would be hard to implement in GRAPE

    • Best code: Using TenserFlow developed by Google

    • Easy GPU integration

  • Follows the qutip code standard

optimizedResult = pulseoptim.optimize_pulse_unitary(
    H,controlsList,initialPsi0,targetPsi0,Ntime,totalTime,
    init_pulse_type='LIN',
)

My code

import tensorflow as tf

def qtp2tf(x): return tf.constant(x.full())

def expm(A): return tf.eye(A.shape[0],dtype=A.dtype)+A

class QuantumOptimalControlModel(object):
    def __init__(
        self,H,controlsList,Ntime,totalTime,pulseType='linear',
    ):
        # constants
        self.Hdrift = qtp2tf(H)
        self.dtype = self.Hdrift.dtype
        self.controlsList = [ qtp2tf(c) for c in controlsList ]
        # variables
        pulse = np.linspace(-1,1,Ntime)
        self.initialAmpsList = [ tf.constant(pulse,dtype=self.dtype) for c in controlsList ]
        self.ampsList = [ tf.Variable(pulse,dtype=self.dtype) for c in controlsList ]
        # others
        self.dim = H.shape[0]
        self.dt = totalTime/Ntime
        self.Ntime = Ntime
    def __call__(self, initialPsi):
        U = tf.eye(self.dim, dtype=self.dtype)
        for nTime in range(self.Ntime):
            Ht = self.Hdrift + sum([
                self.ampsList[nControl][nTime] * control
                for nControl,control in enumerate(self.controlsList)
            ])
            Ut = tf.linalg.expm(-1j * Ht * self.dt)
            #Ut = expm(-1j * Ht * self.dt)
            U = tf.linalg.matmul(Ut, U)
        finalPsi = tf.linalg.matmul(U, initialPsi)
        return finalPsi

def fidelityTensorFlow(a,b):
    return tf.abs(tf.tensordot(tf.squeeze(a), tf.squeeze(b), 1))
def loss(targetPsi, finalPsi):
    return 1 - fidelityTensorFlow(targetPsi, finalPsi)

def train(model,initial,target,learningRate=0.1):
    optimizer = tf.keras.optimizers.SGD(learning_rate=learningRate)
    with tf.GradientTape() as t:
        currentLoss = loss(target,model(initial))
    gradients = t.gradient(currentLoss, model.ampsList)
    optimizer.apply_gradients(zip(gradients, model.ampsList))
    return currentLoss

###########################################

import numpy as np
%matplotlib inline
from qutip import *
import qutip.control.pulseoptim as pulseoptim

from yarn.qutipHelpers import \
    jaynesCummingsHamiltonian, \
    plotExpectation, plotWigners, plotOptimalControl, \
    cat

dimCav = 20
H, controlsDict, observables, operators = jaynesCummingsHamiltonian(
    cavityDimension=dimCav, omegaCavity=1, omegaQubit=1, g=0.1
)
controlsList = list(controlsDict.values())
controlsName = list(controlsDict.keys())

zPlus = (cat(dimCav,2)+cat(dimCav,2j)).unit()
zMinus = (cat(dimCav,2)-cat(dimCav,2j)).unit()

initialPsi0 = tensor(coherent(dimCav,2), fock(2,0))
targetPsi0 = tensor(zPlus, fock(2,0))

Ntime = 500; totalTime = 50
time = np.linspace(0, totalTime, Ntime)

###########################################

initialPsi0 = qtp2tf(initialPsi0)
targetPsi0 = qtp2tf(targetPsi0)

model = QuantumOptimalControlModel(H,controlsList,Ntime,totalTime)

currentLoss = 1
maxIteration=500
for i in range(maxIteration):
    currentLoss = train(model,initialPsi0,targetPsi0)
    percent10 = int(maxIteration/10)
    if i % percent10 == 0:
        print("{}% fidelity error {}".format(
            int(i*100/maxIteration),currentLoss
        ))
0% fidelity error 0.6635763552719809
---------------------------------------------------------------------------

InvalidArgumentError                      Traceback (most recent call last)

<ipython-input-1-bdf2ef10dac3> in <module>
     85 maxIteration=500
     86 for i in range(maxIteration):
---> 87     currentLoss = train(model,initialPsi0,targetPsi0)
     88     percent10 = int(maxIteration/10)
     89     if i % percent10 == 0:


<ipython-input-1-bdf2ef10dac3> in train(model, initial, target, learningRate)
     42     optimizer = tf.keras.optimizers.SGD(learning_rate=learningRate)
     43     with tf.GradientTape() as t:
---> 44         currentLoss = loss(target,model(initial))
     45     gradients = t.gradient(currentLoss, model.ampsList)
     46     optimizer.apply_gradients(zip(gradients, model.ampsList))


<ipython-input-1-bdf2ef10dac3> in __call__(self, initialPsi)
     28                 for nControl,control in enumerate(self.controlsList)
     29             ])
---> 30             Ut = tf.linalg.expm(-1j * Ht * self.dt)
     31             #Ut = expm(-1j * Ht * self.dt)
     32             U = tf.linalg.matmul(Ut, U)


c:\python38\lib\site-packages\tensorflow\python\ops\linalg\linalg_impl.py in matrix_exponential(input, name)
    322     numer = u + v
    323     denom = -u + v
--> 324     result = linalg_ops.matrix_solve(denom, numer)
    325     max_squarings = math_ops.reduce_max(squarings)
    326


c:\python38\lib\site-packages\tensorflow\python\ops\gen_linalg_ops.py in matrix_solve(matrix, rhs, adjoint, name)
   1563         raise
   1564     except _core._NotOkStatusException as e:
-> 1565       _ops.raise_from_not_ok_status(e, name)
   1566   # Add nodes to the TensorFlow graph.
   1567   if adjoint is None:


c:\python38\lib\site-packages\tensorflow\python\framework\ops.py in raise_from_not_ok_status(e, name)
   6651   message = e.message + (" name: " + name if name is not None else "")
   6652   # pylint: disable=protected-access
-> 6653   six.raise_from(core._status_to_exception(e.code, message), None)
   6654   # pylint: enable=protected-access
   6655


~\AppData\Roaming\Python\Python38\site-packages\six.py in raise_from(value, from_value)


InvalidArgumentError: Input matrix is not invertible. [Op:MatrixSolve]