4
4
import mpmath
5
5
from mpmath import polylog , gamma , findroot
6
6
from funcs_sigma import *
7
+ from funcs_sigmadk import *
7
8
import pandas as pd
8
9
import os
9
10
import re
10
11
11
- root_dir = os .path .join (os .path .dirname (__file__ ), "funcs_sigma/" )
12
12
num_loops = [2 , 6 , 15 , 39 , 111 , 448 ]
13
13
14
14
@@ -31,7 +31,7 @@ def load_leaf_info(root_dir, name, key_str):
31
31
df = pd .read_csv (os .path .join (root_dir , f"leafinfo_{ name } _{ key_str } .csv" ))
32
32
with torch .no_grad ():
33
33
leaftypes = torch .tensor (df .iloc [:, 1 ].to_numpy ())
34
- leaforders = torch .tensor ([_StringtoIntVector (x ) for x in df .iloc [:, 2 ]]).T
34
+ leaforders = torch .tensor ([_StringtoIntVector (x )[: 3 ] for x in df .iloc [:, 2 ]]).T
35
35
inTau_idx = torch .tensor (df .iloc [:, 3 ].to_numpy () - 1 )
36
36
outTau_idx = torch .tensor (df .iloc [:, 4 ].to_numpy () - 1 )
37
37
loop_idx = torch .tensor (df .iloc [:, 5 ].to_numpy () - 1 )
@@ -41,11 +41,27 @@ def load_leaf_info(root_dir, name, key_str):
41
41
42
42
class FeynmanIntegrand (nn .Module ):
43
43
@torch .no_grad ()
44
- def __init__ (self , order , beta , loopBasis , leafstates , leafvalues , batchsize ):
44
+ def __init__ (
45
+ self ,
46
+ order ,
47
+ rs ,
48
+ beta ,
49
+ loopBasis ,
50
+ leafstates ,
51
+ leafvalues ,
52
+ batchsize ,
53
+ is_real = True ,
54
+ has_dk = False ,
55
+ ):
45
56
super ().__init__ ()
46
57
# super().__init__(prop_scale=torch.tensor(1.0), prop_shift=torch.tensor(0.0))
47
58
48
- print ("beta:" , beta , "order:" , order , "batchsize:" , batchsize )
59
+ print ("rs:" , rs , "beta:" , beta , "order:" , order , "batchsize:" , batchsize )
60
+
61
+ if is_real :
62
+ self .is_real = True
63
+ else :
64
+ self .is_real = False
49
65
50
66
# Unpack leafstates for clarity
51
67
lftype , lforders , leaf_tau_i , leaf_tau_o , leafMomIdx = leafstates
@@ -100,6 +116,7 @@ def __init__(self, order, beta, loopBasis, leafstates, leafvalues, batchsize):
100
116
"leafvalues" ,
101
117
torch .broadcast_to (leafvalues , (self .batchsize , leafvalues .shape [0 ])),
102
118
)
119
+ # print("lvalue,", self.leafvalues.shape)
103
120
self .register_buffer (
104
121
"p" , torch .zeros ([self .batchsize , self .dim , self .innerLoopNum + 1 ])
105
122
)
@@ -127,20 +144,33 @@ def __init__(self, order, beta, loopBasis, leafstates, leafvalues, batchsize):
127
144
self .p [:, 0 , 0 ] += self .kF
128
145
self .extk = self .kF
129
146
self .extn = 0
130
- self .targetval = 4.0
147
+ # self.targetval = 4.0
131
148
132
149
if order == 1 :
133
- self .eval_graph = torch .jit .script (eval_graph100 )
150
+ if has_dk :
151
+ self .eval_graph = torch .jit .script (eval_graph1001 )
152
+ else :
153
+ self .eval_graph = torch .jit .script (eval_graph100 )
134
154
elif order == 2 :
135
- self .eval_graph = torch .jit .script (eval_graph200 )
155
+ if has_dk :
156
+ self .eval_graph = torch .jit .script (eval_graph2001 )
157
+ else :
158
+ self .eval_graph = torch .jit .script (eval_graph200 )
136
159
elif order == 3 :
137
- self .eval_graph = torch .jit .script (eval_graph300 )
160
+ if has_dk :
161
+ self .eval_graph = torch .jit .script (eval_graph3001 )
162
+ else :
163
+ self .eval_graph = torch .jit .script (eval_graph300 )
138
164
elif order == 4 :
139
- self .eval_graph = torch .jit .script (eval_graph400 )
165
+ if has_dk :
166
+ self .eval_graph = torch .jit .script (eval_graph4001 )
167
+ else :
168
+ self .eval_graph = torch .jit .script (eval_graph400 )
140
169
elif order == 5 :
141
- self .eval_graph = torch .jit .script (eval_graph500 )
142
- elif order == 6 :
143
- self .eval_graph = torch .jit .script (eval_graph600 )
170
+ if has_dk :
171
+ self .eval_graph = torch .jit .script (eval_graph5001 )
172
+ else :
173
+ self .eval_graph = torch .jit .script (eval_graph500 )
144
174
else :
145
175
raise ValueError ("Invalid order" )
146
176
@@ -215,11 +245,24 @@ def _evalleaf(self, var):
215
245
self .kq2 [:] = torch .sum (kq * kq , dim = 1 )
216
246
self .dispersion [:] = self .kq2 / (2 * self .me ) - self .mu
217
247
self .kernelFermiT ()
248
+
249
+ # print("kq", kq.shape)
250
+ ad_factor = kq [:, 0 , :] * self .loopBasis [0 , self .leafMomIdx ]
251
+
252
+ self .leaf_fermi *= ad_factor / self .me * (self .lforders [2 ] == 1 ) + torch .ones (
253
+ self .batchsize , len (self .leafMomIdx ), device = self .tau .device
254
+ ) * (self .lforders [2 ] != 1 )
255
+
218
256
# Calculate bosonic leaves
219
257
self .invK [:] = 1.0 / (self .kq2 + self .mass2 )
220
258
self .leaf_bose [:] = ((self .e0 ** 2 / self .eps0 ) * self .invK ) * (
221
259
self .mass2 * self .invK
222
260
) ** self .lforders [1 ]
261
+ self .leaf_bose *= ad_factor * self .invK * - 2 * (self .lforders [1 ] + 1 ) * (
262
+ self .lforders [2 ] == 1
263
+ ) + torch .ones (self .batchsize , len (self .leafMomIdx ), device = self .tau .device ) * (
264
+ self .lforders [2 ] != 1
265
+ )
223
266
# self.leafvalues[self.isfermi] = self.leaf_fermi[self.isfermi]
224
267
# self.leafvalues[self.isbose] = self.leaf_bose[self.isbose]
225
268
self .leafvalues = torch .where (self .isfermi , self .leaf_fermi , self .leafvalues )
@@ -237,13 +280,58 @@ def __call__(self, var, root):
237
280
/ (2 * np .pi ) ** (self .dim * self .innerLoopNum )
238
281
).unsqueeze (1 )
239
282
283
+ # phase = torch.ones((self.batchsize, 1), device=root.device)
284
+ if self .is_real :
285
+ phase = torch .ones ((self .batchsize , 1 ), device = root .device )
286
+ else :
287
+ phase = torch .zeros ((self .batchsize , 1 ), device = root .device )
288
+
289
+ if self .totalTauNum > 1 :
290
+ if self .is_real :
291
+ phase = torch .hstack (
292
+ [
293
+ phase ,
294
+ torch .cos (
295
+ (2 * self .extn + 1 )
296
+ * np .pi
297
+ / self .beta
298
+ * var [:, : self .totalTauNum - 1 ]
299
+ ),
300
+ ]
301
+ )
302
+ else :
303
+ phase = torch .hstack (
304
+ [
305
+ phase ,
306
+ torch .sin (
307
+ (2 * self .extn + 1 )
308
+ * np .pi
309
+ / self .beta
310
+ * var [:, : self .totalTauNum - 1 ]
311
+ ),
312
+ ]
313
+ )
314
+
315
+ # print(self.totalTauNum, root.shape, phase.shape)
316
+
317
+ root *= phase
318
+
240
319
return root .sum (dim = 1 )
241
320
242
321
243
- def init_feynfunc (order , beta , batch_size ):
244
- partition = [(order , 0 , 0 )]
245
- name = "sigma"
246
- df = pd .read_csv (os .path .join (root_dir , f"loopBasis_{ name } _maxOrder6.csv" ))
322
+ def init_feynfunc (order , rs , beta , batch_size , is_real = True , has_dk = False ):
323
+ if has_dk :
324
+ name = "sigmadk"
325
+ root_dir = os .path .join (os .path .dirname (__file__ ), "funcs_sigmadk/" )
326
+ f_loopbasis = f"loopBasis_{ name } _maxOrder5.csv"
327
+ partition = [(order , 0 , 0 , 1 )]
328
+ else :
329
+ name = "sigma"
330
+ root_dir = os .path .join (os .path .dirname (__file__ ), "funcs_sigma/" )
331
+ f_loopbasis = f"loopBasis_{ name } _maxOrder6.csv"
332
+ partition = [(order , 0 , 0 )]
333
+
334
+ df = pd .read_csv (os .path .join (root_dir , f_loopbasis ))
247
335
with torch .no_grad ():
248
336
loopBasis = torch .Tensor (
249
337
df .iloc [: order + 1 , : num_loops [order - 1 ]].to_numpy ()
@@ -258,7 +346,15 @@ def init_feynfunc(order, beta, batch_size):
258
346
leafvalues .append (values )
259
347
260
348
feynfunc = FeynmanIntegrand (
261
- order , beta , loopBasis , leafstates [0 ], leafvalues [0 ], batch_size
349
+ order ,
350
+ rs ,
351
+ beta ,
352
+ loopBasis ,
353
+ leafstates [0 ],
354
+ leafvalues [0 ],
355
+ batch_size ,
356
+ is_real = is_real ,
357
+ has_dk = has_dk ,
262
358
)
263
359
264
360
return feynfunc
0 commit comments