Skip to content

Commit 6ffb0d0

Browse files
committed
initial commit
1 parent 9d38efd commit 6ffb0d0

File tree

5 files changed

+398
-0
lines changed

5 files changed

+398
-0
lines changed

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
.DS_Store
2+
3+
__py_cache__/
4+
5+
Result/

args.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import argparse
2+
3+
parser = argparse.ArgumentParser(description="")
4+
5+
parser.add_argument("-model", type=str, default="XY", help="interacting model [XY]")
6+
parser.add_argument("-tau", type=float, default=0.025, help="initial inverse temperature [0.025]")
7+
parser.add_argument("-NBeta", type=int, default=10, help="number of temperature point [10]")
8+
9+
parser.add_argument("-D", type=int, default=20, help="bond dimensions [20]")
10+
parser.add_argument("-depth", type=int, default=3, help="sweep depth [3]")
11+
parser.add_argument("-Niter", type=int, default=10, help="max Iteration of single isometry optimization [10]")
12+
parser.add_argument("-Nsweep", type=int, default=3, help="number of sweeps [3]")
13+
parser.add_argument("-opti", type=int, default=1, help="optimization flag [1]")
14+
15+
parser.add_argument("-use_float32", action="store_true", help="use float32")
16+
parser.add_argument("-cuda", type=int, default=-1, help="GPU ID [default:-1 (CPU)] ")
17+
parser.add_argument("-rdir", type=str, default="./Result/", help="result folder")
18+
19+
args = parser.parse_args()
20+
21+
print('---------------+---------')
22+
print('%15s|%8s'%('arguments','values'))
23+
print('---------------+---------')
24+
for arg in vars(args):
25+
print('%15s|%8s'%(arg, getattr(args, arg)))
26+
print('---------------+---------')
27+
print('\n')

dTRG.py

Lines changed: 263 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,263 @@
1+
import os
2+
import sys
3+
import re
4+
import torch
5+
import time
6+
import numpy as np
7+
from utils import kronecker_product as kron
8+
from torch.utils.checkpoint import checkpoint
9+
from datetime import datetime
10+
from expm import expm
11+
12+
class iXTRG(torch.nn.Module):
13+
def __init__(self,args,NBeta,dtype,device):
14+
super(iXTRG, self).__init__()
15+
16+
# model
17+
self.NBeta = NBeta # no. of isometries on different layers, starts from 1
18+
self.tau = torch.tensor([args.tau], dtype=dtype, device=device)
19+
self.model = args.model
20+
21+
# algorithm
22+
self.D = args.D
23+
self.opti = args.opti
24+
self.Niter = args.Niter
25+
self.Nsweep = args.Nsweep
26+
self.depth = args.depth
27+
28+
# io
29+
self.dtype = dtype
30+
self.device = device
31+
self.rdir = args.rdir
32+
33+
self.fName = self.rdir+ 'Fe_'+self.model+'_tau'+str(self.tau.item())+'_NBeta'+str(self.NBeta)+\
34+
'_Dc'+str(self.D)+'_opti'+str(self.opti)+\
35+
'_Niter'+str(self.Niter)+'_Nswp'+str(self.Nsweep)+\
36+
'_depth'+str(self.depth)+'_device'+str(args.cuda)+'.pt'
37+
38+
# create dir if not existed
39+
if os.path.isdir(self.rdir) is False: os.makedirs(self.rdir)
40+
41+
# Module Parameters
42+
"""
43+
[Wl_1, Wl_2, ..., Wl_NBeta, Wr_1, Wr_2, ..., Wr_NBeta]
44+
"""
45+
"""
46+
2nd-order Trotter-Suzuki Decomposition!!!
47+
"""
48+
nb = self.NBeta-1
49+
Dl = min(4**(nb+2), self.D)
50+
Dr = min(4**(nb+2), self.D)
51+
D2 = min(4**(nb+3), self.D)
52+
Wl.append(torch.randn(Dl,Dr,D2,dtype=dtype,device='cpu'))
53+
54+
Dl = min(4**(nb+1), self.D)
55+
Dr = min(4**(nb+1), self.D)
56+
D2 = min(4**(nb+2), self.D)
57+
Wr.append(torch.randn(Dl,Dr,D2,dtype=dtype,device='cpu'))
58+
59+
self.params = torch.nn.ParameterList(
60+
[torch.nn.Parameter(_.to(device)) for _ in Wl+Wr]
61+
)
62+
self.params.append(torch.nn.Parameter(self.tau))
63+
64+
def turn_on_grad(self,i):
65+
for j, p in enumerate(self.parameters()):
66+
if (j==i): p.requires_grad = True
67+
else: p.requires_grad = False
68+
69+
def getHamilton(self):
70+
# spin operator
71+
sz = torch.tensor([[1, 0],[0,-1]], dtype=dtype, device=device)/2
72+
sx = torch.tensor([[0, 1],[1, 0]], dtype=dtype, device=device)/2
73+
sy = torch.tensor([[0,-1],[1, 0]], dtype=dtype, device=device)/2
74+
75+
if self.model == "Ising":
76+
H = kron(sz,sz)
77+
elif self.model == "XY":
78+
H = kron(sx,sx) - kron(sy,sy)
79+
return H
80+
81+
def getisometry(self, step, sizeT):
82+
D = min(self.D, sizeT)
83+
D_new = min(self.D, D**2)
84+
return self.params[step][:D,:D,:D_new]
85+
86+
def getMaxEigBiLayer(self, Ta, Tb):
87+
# initial boundary 'vector'
88+
Vl = torch.einsum('ijkl,mjkl->im',Ta,Ta)
89+
normFactor = torch.norm(Vl)
90+
Vl = Vl/normFactor
91+
92+
# Power method
93+
matchCnt = 0
94+
for _ in range(400):
95+
Vl = torch.einsum('im,ijkl,mnkl->jn',Vl,Ta,Ta)
96+
Vl = torch.einsum('im,ijkl,mnkl->jn',Vl,Tb,Tb)
97+
if (torch.norm(Vl)-normFactor)/normFactor<1e-8: matchCnt +=1
98+
if matchCnt==5: break
99+
normFactor = torch.norm(Vl)
100+
Vl = Vl/normFactor
101+
if _==399: print('Eig not well converged!',end=' ')
102+
return normFactor
103+
104+
105+
def initRho(self, trotter_order=2):
106+
tau,device,dtype,model = self.params[-1],self.device,self.dtype,self.model
107+
#print("Generate initial rho({}) via Trotter decomp.\n".format(tau))
108+
if trotter_order==1:
109+
# get Hamiltonian
110+
H = self.getHamilton()
111+
# local trotter gate
112+
rho = expm(-tau*H).view(2,2,2,2)
113+
rho = torch.einsum('ijkl->ikjl',rho).contiguous().view(4,4)
114+
# svd & truncate the 0 values
115+
U,S,V = torch.svd(rho)
116+
# trotter gate in form of two tensor contraction
117+
hl = (U@torch.diag(torch.sqrt(S))).view(2,2,4)
118+
hr = (V@torch.diag(torch.sqrt(S))).view(2,2,4)
119+
# local tensor of initial mpo, index order: [l,r,d,u]
120+
Ta = torch.tensordot(hr,hl,([0],[1])).permute(1,3,2,0).contiguous()
121+
Tb = torch.tensordot(hr,hl,([1],[0])).permute(1,3,0,2).contiguous()
122+
elif trotter_order==2:
123+
# get Hamiltonian
124+
H = self.getHamilton()
125+
# local trotter gate
126+
rho = expm(-tau*H).view(2,2,2,2)
127+
rho = torch.einsum('ijkl->ikjl',rho).contiguous().view(4,4)
128+
# half local trotter gate
129+
rho_half = expm(-tau*H/2).view(2,2,2,2)
130+
rho_half = torch.einsum('ijkl->ikjl',rho_half).contiguous().view(4,4)
131+
# svd
132+
U,S,V = torch.svd(rho)
133+
U2,S2,V2 = torch.svd(rho_half)
134+
# trotter gate in form of two tensor contraction
135+
hl = (U@torch.diag(torch.sqrt(S))).view(2,2,4)
136+
hr = (V@torch.diag(torch.sqrt(S))).view(2,2,4)
137+
hl2 = (U2@torch.diag(torch.sqrt(S2))).view(2,2,4)
138+
hr2 = (V2@torch.diag(torch.sqrt(S2))).view(2,2,4)
139+
# local tensor of initial mpo, index order: [l,r,d,u]
140+
Ta = torch.einsum('ijk,jlm,lno->okmin',hr2,hl,hr2).contiguous().view(16,4,2,2)
141+
Tb = torch.einsum('ijk,jlm,lno->mokin',hl2,hr,hl2).contiguous().view(4,16,2,2)
142+
else:
143+
raise Exception('only 1st and 2nd trotter are available!')
144+
return Ta,Tb
145+
146+
147+
def forward(self, nlayer):
148+
if nlayer == 0:
149+
[Ta, Tb] = self.initRho()
150+
lnZ = 0.0
151+
else:
152+
Ta = Tas[nlayer-1].to(device)
153+
Tb = Tbs[nlayer-1].to(device)
154+
lnZ = lnZs[nlayer-1].to(device)
155+
156+
for nbeta in range(nlayer,self.NBeta):
157+
Sizea = list(Ta.size())
158+
Sizeb = list(Tb.size())
159+
160+
# obtain Isometry tensor from antisymmetric tensor
161+
Wa = self.getisometry(nbeta,Sizea[0])
162+
Wb = self.getisometry(nbeta+self.NBeta,Sizeb[0])
163+
164+
# evolution Ta & Tb
165+
Ta = checkpoint(self.rgtens,Wa,Ta,Wb)
166+
Tb = checkpoint(self.rgtens,Wb,Tb,Wa)
167+
lnZ = 2*lnZ + torch.log(torch.norm(Ta))\
168+
+ torch.log(torch.norm(Tb))
169+
Ta = Ta/torch.norm(Ta)
170+
Tb = Tb/torch.norm(Tb)
171+
172+
if len(Tas)<nbeta+1: Tas.append(Ta.detach().to('cpu'))
173+
else: Tas[nbeta]=Ta.detach().to('cpu')
174+
if len(Tbs)<nbeta+1: Tbs.append(Tb.detach().to('cpu'))
175+
else: Tbs[nbeta]=Tb.detach().to('cpu')
176+
if len(lnZs)<nbeta+1: lnZs.append(lnZ.detach().to('cpu'))
177+
else: lnZs[nbeta]=lnZ.detach().to('cpu')
178+
179+
# free energy
180+
ee = self.getMaxEigBiLayer(Ta,Tb)
181+
lnZ = 1/2*(2*lnZ + torch.log(ee))
182+
return lnZ
183+
184+
def rgtens(self, Wa, T, Wb):
185+
T = torch.einsum('ijk,ilmn,jopm,loq->kqpn',Wa,T,T,Wb)
186+
return T
187+
188+
def update_single_layer(self, nlayer):
189+
loss_old = 0
190+
for niter in range(self.Niter):
191+
for ii in range(2):
192+
print('\t\tW(%02d-%02d),'%(nlayer,ii),end=' ')
193+
self.turn_on_grad(nlayer+self.NBeta*ii)
194+
195+
self.zero_grad()
196+
loss = self.forward(nlayer)
197+
loss.backward()
198+
199+
with torch.no_grad():
200+
E = self.params[nlayer+self.NBeta*ii].grad
201+
D, D, D_new = E.shape
202+
E = E.view(D**2, D_new)
203+
# perform MERA update
204+
U,S,V = torch.svd(E)
205+
self.params[nlayer+self.NBeta*ii].data = (U@V.t()).view(D,D,D_new)
206+
print('%+.15f'%(-loss.item()/2**(self.NBeta+1)/self.tau), end=' ')
207+
if abs((loss_old - loss.item())/loss.item())<1e-8: break
208+
else:
209+
loss_old = loss.item()
210+
print(' ')
211+
return loss.item()
212+
213+
214+
"""
215+
===================================================================================
216+
Main entry of the program
217+
===================================================================================
218+
"""
219+
if __name__=="__main__":
220+
from args import args
221+
device = torch.device("cpu" if args.cuda<0 else "cuda:"+str(args.cuda))
222+
dtype = torch.float32 if args.use_float32 else torch.float64
223+
FeS = [] # init thermal quantities
224+
225+
Wl = []; Wr = []
226+
Tas = []; Tbs = []; lnZs = []
227+
for nbeta in range(args.NBeta): # main loop, cooling down the system
228+
beta = 2**(nbeta+1)*args.tau
229+
model = iXTRG(args, nbeta+1, dtype, device)
230+
print('\n '+'-'*40)
231+
print('Optimizing lnZ(%.5f)'%(2*beta))
232+
print('\nInitialization:')
233+
loss = model.update_single_layer(nbeta)
234+
235+
opti_flag = 0 # no sweep by default
236+
if(nbeta+1 >= model.opti) :
237+
opti_flag = 1
238+
239+
depth = min(model.depth,nbeta)
240+
# call sweep optimization
241+
if opti_flag:
242+
print('\noptimiztion:')
243+
for nswp in range(args.Nsweep):
244+
print('#sweep {}:'.format(nswp))
245+
# nlayer is No. of layer to be optimized, from 1 to
246+
for nlayer in range(nbeta-depth,nbeta+1):
247+
loss = model.update_single_layer(nlayer)
248+
print(' ')
249+
250+
# saving isometric tensor for next iteration
251+
Wl = [m.detach().to('cpu') for m in model.params[:nbeta+1]]
252+
Wr = [m.detach().to('cpu') for m in model.params[nbeta+1:-1]]
253+
254+
# remove redundant isometries (to save memory)
255+
if (nbeta-depth >= 1):
256+
Wl[nbeta-depth-1] = torch.tensor([0.])
257+
Wr[nbeta-depth-1] = torch.tensor([0.])
258+
259+
del model.params
260+
torch.cuda.empty_cache()
261+
262+
# save data
263+
torch.save(FeS, model.fName)

expm.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
#copied from https://github.com/geoopt/geoopt/blob/master/geoopt/linalg/_expm.py
2+
3+
# author: @nsde
4+
# maintainer: @ferrine
5+
6+
7+
import torch.jit
8+
9+
10+
@torch.jit.script
11+
def torch_pade13(A): # pragma: no cover
12+
# avoid torch select operation and unpack coefs
13+
(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13) = (
14+
64764752532480000.0,
15+
32382376266240000.0,
16+
7771770303897600.0,
17+
1187353796428800.0,
18+
129060195264000.0,
19+
10559470521600.0,
20+
670442572800.0,
21+
33522128640.0,
22+
1323241920.0,
23+
40840800.0,
24+
960960.0,
25+
16380.0,
26+
182.0,
27+
1.0,
28+
)
29+
30+
ident = torch.eye(A.shape[1], dtype=A.dtype, device=A.device)
31+
A2 = torch.matmul(A, A)
32+
A4 = torch.matmul(A2, A2)
33+
A6 = torch.matmul(A4, A2)
34+
U = torch.matmul(
35+
A,
36+
torch.matmul(A6, b13 * A6 + b11 * A4 + b9 * A2)
37+
+ b7 * A6
38+
+ b5 * A4
39+
+ b3 * A2
40+
+ b1 * ident,
41+
)
42+
V = (
43+
torch.matmul(A6, b12 * A6 + b10 * A4 + b8 * A2)
44+
+ b6 * A6
45+
+ b4 * A4
46+
+ b2 * A2
47+
+ b0 * ident
48+
)
49+
return U, V
50+
51+
52+
@torch.jit.script
53+
def matrix_2_power(x, p): # pragma: no cover
54+
for _ in range(int(p)):
55+
x = x @ x
56+
return x
57+
58+
59+
@torch.jit.script
60+
def expm(A): # pragma: no cover
61+
# no checks, this is private implementation
62+
# but A should be a matrix
63+
A_fro = torch.norm(A)
64+
65+
# Scaling step
66+
67+
n_squarings = torch.clamp(
68+
torch.ceil(torch.log(A_fro / 5.371920351148152).div(0.6931471805599453)), min=0
69+
)
70+
scaling = 2.0 ** n_squarings
71+
Ascaled = A / scaling
72+
73+
# Pade 13 approximation
74+
U, V = torch_pade13(Ascaled)
75+
P = U + V
76+
Q = -U + V
77+
78+
R, _ = torch.solve(P, Q) # solve P = Q*R
79+
expmA = matrix_2_power(R, n_squarings)
80+
return expmA

0 commit comments

Comments
 (0)