Skip to content

Commit cf96029

Browse files
committed
add effective mass calculation example
1 parent fb6c0a4 commit cf96029

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+21248
-148096
lines changed

examples/example_effective_mass.py

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
from feynmandiag import init_feynfunc
2+
import torch
3+
from MCintegration import (
4+
MonteCarlo,
5+
MarkovChainMonteCarlo,
6+
Vegas,
7+
set_seed,
8+
get_device,
9+
)
10+
import time
11+
import numpy as np
12+
13+
maxOrder = 3
14+
beta = 10.0
15+
rs = 1.0
16+
17+
device = get_device()
18+
batch_size = 5000
19+
n_eval = 1000000
20+
n_therm = 10
21+
22+
num_roots = [1, 2, 3, 4, 5, 6]
23+
24+
25+
def zfactor_inv(maxOrder):
26+
sig_imag = [1.0]
27+
28+
for order in range(1, maxOrder + 1):
29+
feynfunc = init_feynfunc(order, rs, beta, batch_size, is_real=False)
30+
feynfunc.to(device)
31+
f_dim = num_roots[order - 1]
32+
33+
vegas_map = Vegas(feynfunc.ndims, ninc=1000, device=device)
34+
bounds = [[0, 1]] * feynfunc.ndims
35+
vegasmcmc_integrator = MarkovChainMonteCarlo(
36+
bounds,
37+
feynfunc,
38+
f_dim=f_dim,
39+
batch_size=batch_size,
40+
nburnin=n_therm,
41+
maps=vegas_map,
42+
device=device,
43+
)
44+
res = vegasmcmc_integrator(neval=n_eval, mix_rate=0.5)
45+
46+
print(res)
47+
w0_inv = feynfunc.beta.item() / np.pi
48+
if order == 1:
49+
sig_imag.append(-res * w0_inv)
50+
else:
51+
sig_imag.append(-sum([x * w0_inv for x in res]))
52+
print("inverse Z:", sig_imag)
53+
54+
return sig_imag
55+
56+
57+
def meff_inv(maxOrder):
58+
sig_real = [1.0]
59+
60+
for order in range(1, maxOrder + 1):
61+
feynfunc = init_feynfunc(order, rs, beta, batch_size, is_real=True, has_dk=True)
62+
feynfunc.to(device)
63+
f_dim = num_roots[order - 1]
64+
65+
vegas_map = Vegas(feynfunc.ndims, ninc=1000, device=device)
66+
bounds = [[0, 1]] * feynfunc.ndims
67+
vegasmcmc_integrator = MarkovChainMonteCarlo(
68+
bounds,
69+
feynfunc,
70+
f_dim=f_dim,
71+
batch_size=batch_size,
72+
nburnin=n_therm,
73+
maps=vegas_map,
74+
device=device,
75+
)
76+
res = vegasmcmc_integrator(neval=n_eval, mix_rate=0.5)
77+
78+
print(res)
79+
prefactor = feynfunc.me / feynfunc.kF
80+
if order == 1:
81+
sig_real.append(res * prefactor)
82+
else:
83+
sig_real.append(sum([x * prefactor for x in res]))
84+
print("inverse meff:", sig_real)
85+
86+
return sig_real
87+
88+
89+
print("Calculating inverse Z for each order...")
90+
z_factor_inv = zfactor_inv(maxOrder)
91+
92+
print("\nCalculating inverse meff (without Z) for each order...")
93+
m_eff_inv = meff_inv(maxOrder)
94+
95+
meff = sum(z_factor_inv) / sum(m_eff_inv)
96+
97+
print(f"\nEffective mass (up to order {maxOrder}):")
98+
print(meff)

examples/feynman_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
beta = 10.0
2020
feynfunc = init_feynfunc(order, beta, batch_size)
2121
feynfunc.to(device)
22-
f_dim = num_roots[order - 1]
22+
f_dim = num_roots[order]
2323

2424

2525
vegas_map = Vegas(feynfunc.ndims, ninc=1000, device=device)

examples/feynmandiag.py

Lines changed: 113 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44
import mpmath
55
from mpmath import polylog, gamma, findroot
66
from funcs_sigma import *
7+
from funcs_sigmadk import *
78
import pandas as pd
89
import os
910
import re
1011

11-
root_dir = os.path.join(os.path.dirname(__file__), "funcs_sigma/")
1212
num_loops = [2, 6, 15, 39, 111, 448]
1313

1414

@@ -31,7 +31,7 @@ def load_leaf_info(root_dir, name, key_str):
3131
df = pd.read_csv(os.path.join(root_dir, f"leafinfo_{name}_{key_str}.csv"))
3232
with torch.no_grad():
3333
leaftypes = torch.tensor(df.iloc[:, 1].to_numpy())
34-
leaforders = torch.tensor([_StringtoIntVector(x) for x in df.iloc[:, 2]]).T
34+
leaforders = torch.tensor([_StringtoIntVector(x)[:3] for x in df.iloc[:, 2]]).T
3535
inTau_idx = torch.tensor(df.iloc[:, 3].to_numpy() - 1)
3636
outTau_idx = torch.tensor(df.iloc[:, 4].to_numpy() - 1)
3737
loop_idx = torch.tensor(df.iloc[:, 5].to_numpy() - 1)
@@ -41,11 +41,27 @@ def load_leaf_info(root_dir, name, key_str):
4141

4242
class FeynmanIntegrand(nn.Module):
4343
@torch.no_grad()
44-
def __init__(self, order, beta, loopBasis, leafstates, leafvalues, batchsize):
44+
def __init__(
45+
self,
46+
order,
47+
rs,
48+
beta,
49+
loopBasis,
50+
leafstates,
51+
leafvalues,
52+
batchsize,
53+
is_real=True,
54+
has_dk=False,
55+
):
4556
super().__init__()
4657
# super().__init__(prop_scale=torch.tensor(1.0), prop_shift=torch.tensor(0.0))
4758

48-
print("beta:", beta, "order:", order, "batchsize:", batchsize)
59+
print("rs:", rs, "beta:", beta, "order:", order, "batchsize:", batchsize)
60+
61+
if is_real:
62+
self.is_real = True
63+
else:
64+
self.is_real = False
4965

5066
# Unpack leafstates for clarity
5167
lftype, lforders, leaf_tau_i, leaf_tau_o, leafMomIdx = leafstates
@@ -100,6 +116,7 @@ def __init__(self, order, beta, loopBasis, leafstates, leafvalues, batchsize):
100116
"leafvalues",
101117
torch.broadcast_to(leafvalues, (self.batchsize, leafvalues.shape[0])),
102118
)
119+
# print("lvalue,", self.leafvalues.shape)
103120
self.register_buffer(
104121
"p", torch.zeros([self.batchsize, self.dim, self.innerLoopNum + 1])
105122
)
@@ -127,20 +144,33 @@ def __init__(self, order, beta, loopBasis, leafstates, leafvalues, batchsize):
127144
self.p[:, 0, 0] += self.kF
128145
self.extk = self.kF
129146
self.extn = 0
130-
self.targetval = 4.0
147+
# self.targetval = 4.0
131148

132149
if order == 1:
133-
self.eval_graph = torch.jit.script(eval_graph100)
150+
if has_dk:
151+
self.eval_graph = torch.jit.script(eval_graph1001)
152+
else:
153+
self.eval_graph = torch.jit.script(eval_graph100)
134154
elif order == 2:
135-
self.eval_graph = torch.jit.script(eval_graph200)
155+
if has_dk:
156+
self.eval_graph = torch.jit.script(eval_graph2001)
157+
else:
158+
self.eval_graph = torch.jit.script(eval_graph200)
136159
elif order == 3:
137-
self.eval_graph = torch.jit.script(eval_graph300)
160+
if has_dk:
161+
self.eval_graph = torch.jit.script(eval_graph3001)
162+
else:
163+
self.eval_graph = torch.jit.script(eval_graph300)
138164
elif order == 4:
139-
self.eval_graph = torch.jit.script(eval_graph400)
165+
if has_dk:
166+
self.eval_graph = torch.jit.script(eval_graph4001)
167+
else:
168+
self.eval_graph = torch.jit.script(eval_graph400)
140169
elif order == 5:
141-
self.eval_graph = torch.jit.script(eval_graph500)
142-
elif order == 6:
143-
self.eval_graph = torch.jit.script(eval_graph600)
170+
if has_dk:
171+
self.eval_graph = torch.jit.script(eval_graph5001)
172+
else:
173+
self.eval_graph = torch.jit.script(eval_graph500)
144174
else:
145175
raise ValueError("Invalid order")
146176

@@ -215,11 +245,24 @@ def _evalleaf(self, var):
215245
self.kq2[:] = torch.sum(kq * kq, dim=1)
216246
self.dispersion[:] = self.kq2 / (2 * self.me) - self.mu
217247
self.kernelFermiT()
248+
249+
# print("kq", kq.shape)
250+
ad_factor = kq[:, 0, :] * self.loopBasis[0, self.leafMomIdx]
251+
252+
self.leaf_fermi *= ad_factor / self.me * (self.lforders[2] == 1) + torch.ones(
253+
self.batchsize, len(self.leafMomIdx), device=self.tau.device
254+
) * (self.lforders[2] != 1)
255+
218256
# Calculate bosonic leaves
219257
self.invK[:] = 1.0 / (self.kq2 + self.mass2)
220258
self.leaf_bose[:] = ((self.e0**2 / self.eps0) * self.invK) * (
221259
self.mass2 * self.invK
222260
) ** self.lforders[1]
261+
self.leaf_bose *= ad_factor * self.invK * -2 * (self.lforders[1] + 1) * (
262+
self.lforders[2] == 1
263+
) + torch.ones(self.batchsize, len(self.leafMomIdx), device=self.tau.device) * (
264+
self.lforders[2] != 1
265+
)
223266
# self.leafvalues[self.isfermi] = self.leaf_fermi[self.isfermi]
224267
# self.leafvalues[self.isbose] = self.leaf_bose[self.isbose]
225268
self.leafvalues = torch.where(self.isfermi, self.leaf_fermi, self.leafvalues)
@@ -237,13 +280,58 @@ def __call__(self, var, root):
237280
/ (2 * np.pi) ** (self.dim * self.innerLoopNum)
238281
).unsqueeze(1)
239282

283+
# phase = torch.ones((self.batchsize, 1), device=root.device)
284+
if self.is_real:
285+
phase = torch.ones((self.batchsize, 1), device=root.device)
286+
else:
287+
phase = torch.zeros((self.batchsize, 1), device=root.device)
288+
289+
if self.totalTauNum > 1:
290+
if self.is_real:
291+
phase = torch.hstack(
292+
[
293+
phase,
294+
torch.cos(
295+
(2 * self.extn + 1)
296+
* np.pi
297+
/ self.beta
298+
* var[:, : self.totalTauNum - 1]
299+
),
300+
]
301+
)
302+
else:
303+
phase = torch.hstack(
304+
[
305+
phase,
306+
torch.sin(
307+
(2 * self.extn + 1)
308+
* np.pi
309+
/ self.beta
310+
* var[:, : self.totalTauNum - 1]
311+
),
312+
]
313+
)
314+
315+
# print(self.totalTauNum, root.shape, phase.shape)
316+
317+
root *= phase
318+
240319
return root.sum(dim=1)
241320

242321

243-
def init_feynfunc(order, beta, batch_size):
244-
partition = [(order, 0, 0)]
245-
name = "sigma"
246-
df = pd.read_csv(os.path.join(root_dir, f"loopBasis_{name}_maxOrder6.csv"))
322+
def init_feynfunc(order, rs, beta, batch_size, is_real=True, has_dk=False):
323+
if has_dk:
324+
name = "sigmadk"
325+
root_dir = os.path.join(os.path.dirname(__file__), "funcs_sigmadk/")
326+
f_loopbasis = f"loopBasis_{name}_maxOrder5.csv"
327+
partition = [(order, 0, 0, 1)]
328+
else:
329+
name = "sigma"
330+
root_dir = os.path.join(os.path.dirname(__file__), "funcs_sigma/")
331+
f_loopbasis = f"loopBasis_{name}_maxOrder6.csv"
332+
partition = [(order, 0, 0)]
333+
334+
df = pd.read_csv(os.path.join(root_dir, f_loopbasis))
247335
with torch.no_grad():
248336
loopBasis = torch.Tensor(
249337
df.iloc[: order + 1, : num_loops[order - 1]].to_numpy()
@@ -258,7 +346,15 @@ def init_feynfunc(order, beta, batch_size):
258346
leafvalues.append(values)
259347

260348
feynfunc = FeynmanIntegrand(
261-
order, beta, loopBasis, leafstates[0], leafvalues[0], batch_size
349+
order,
350+
rs,
351+
beta,
352+
loopBasis,
353+
leafstates[0],
354+
leafvalues[0],
355+
batch_size,
356+
is_real=is_real,
357+
has_dk=has_dk,
262358
)
263359

264360
return feynfunc

0 commit comments

Comments
 (0)