-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.sh
62 lines (53 loc) · 1.51 KB
/
train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
## Parameters of GPU
gpu=0
threads=16
# Parameters of training
L=12
data=Ins_12_d0.2
adj=None
Net=Simple_
input_size=$(($L*$L))
embedding_size=100
hidden_size=64
output_size=2
z=1e-3j
restr=False # False: fc, 1: 1D NN, 2: 2D NN, 3: 1D NNN, 4: 2D NNN
diago=False # True, False, 1: 1D NN, 2: 2D NN, 3: 1D NNN, 4: 2D NNN
hermi=True # True, False, 0: naive hermi
bound=1 # initial bound
entanglement=False # False, int or float
delta=0
tc=None
gradsnorm=False
loss=CE # NLL, CE, BCE, BCEWL
opt=Adam
lr=1e-3
wd=0
betas=0.9,0.999
sch=StepLR
gamma=0.5
ss=20
drop=0
disor=0
epochs=21
workers=8
batchsize=256
print_freq=20
save_freq=10
seed=0
category=STRUCTURE
preNet=Naive_h_4-
checkpointID=checkpoint_0100
# paths
pretrained="models/${data}/${category}/${preNet}/model_best.pth.tar"
resume="models/${data}/${category}/${preNet}/${checkpointID}.pth.tar"
source activate
#source /opt/anaconda3/etc/profile.d/conda.sh
conda activate pytorch
python Train.py \
-t $threads -j $workers -b $batchsize -p $print_freq -s $save_freq --epochs $epochs --data $data --adj $adj \
--gpu $gpu --loss $loss --opt $opt --lr $lr --wd $wd --betas $betas --sch $sch --gamma $gamma --ss $ss \
--Net $Net --z $z --entanglement $entanglement --delta $delta --tc $tc --gradsnorm $gradsnorm --seed $seed \
--input_size $input_size --embedding_size $embedding_size --hidden_size $hidden_size --output_size $output_size \
--drop $drop --disor $disor --init_bound $bound --restr $restr --hermi $hermi --diago $diago \
--scale --lars