Skip to content

Commit 44121ae

Browse files
committed
Upload files for running our model
0 parents  commit 44121ae

File tree

5 files changed

+717
-0
lines changed

5 files changed

+717
-0
lines changed

Dataset.zip

4.42 MB
Binary file not shown.

architecture_cnn_periodic.py

+247
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
import keras.layers
2+
import tensorflow as tf
3+
import tensorflow.keras.backend as K
4+
import numpy as np
5+
6+
from tensorflow.keras.layers import Add
7+
from tensorflow.keras.backend import sum
8+
from tensorflow.python.client import device_lib
9+
print(device_lib.list_local_devices())
10+
11+
def tf_r2(y_true, y_pred):
12+
SS_res = K.sum(K.square(y_true - y_pred))
13+
SS_tot = K.sum(K.square(y_true-K.mean(y_true)))
14+
return 1 - SS_res/(SS_tot + K.epsilon())
15+
16+
17+
def periodic_padding_flexible(tensor, axis, padding=1):
18+
"""
19+
add periodic padding to a tensor for specified axis
20+
tensor: input tensor
21+
axis: on or multiple axis to pad along, int or tuple
22+
padding: number of cells to pad, int or tuple
23+
24+
return: padded tensor
25+
"""
26+
if isinstance(axis,int):
27+
axis = (axis,)
28+
if isinstance(padding,int):
29+
padding = (padding,)
30+
31+
ndim = len(tensor.shape)
32+
for ax,p in zip(axis,padding):
33+
# create a slice object that selects everything from all axes,
34+
# except only 0:p for the specified for right, and -p: for left
35+
36+
ind_right = [slice(-p,None) if i == ax else slice(None) for i in range(ndim)]
37+
ind_left = [slice(0, p) if i == ax else slice(None) for i in range(ndim)]
38+
right = tensor[ind_right]
39+
left = tensor[ind_left]
40+
middle = tensor
41+
tensor = tf.concat([right,middle,left], axis=ax)
42+
43+
return tensor
44+
45+
class LogLearningRateScheduler(tf.keras.callbacks.LearningRateScheduler):
46+
"""
47+
Make learning rate schedule function for log reduction.
48+
Args:
49+
lr_start (float, optional): Learning rate to start with. The default is 1e-3.
50+
lr_stop (float, optional): Final learning rate at the end of epo. The default is 1e-5.
51+
epochs (int, optional): Total number of epochs to reduce learning rate towards. The default is 100.
52+
epomin (int, optional): Minimum number of epochs at beginning to leave learning rate constant. The default is 10.
53+
Example:
54+
model.fit(callbacks=[LogLearningRateScheduler()])
55+
"""
56+
def __init__(self, lr_start=1e-3, lr_stop=1e-5, epochs=100, epomin=10, verbose=0):
57+
self.lr_start = lr_start
58+
self.lr_stop = lr_stop
59+
self.epochs = epochs
60+
self.epomin = epomin
61+
super(LogLearningRateScheduler, self).__init__(schedule=self.schedule_epoch_lr, verbose=verbose)
62+
63+
def schedule_epoch_lr(self, epoch, lr):
64+
if epoch < self.epomin:
65+
out = self.lr_start
66+
else:
67+
out = np.exp(
68+
float(
69+
np.log(self.lr_start) - (np.log(self.lr_start) - np.log(self.lr_stop)) /
70+
(self.epochs - self.epomin) * (epoch - self.epomin)
71+
)
72+
)
73+
print('lr scheduler', epoch, out)
74+
return float(out)
75+
76+
def get_config(self):
77+
config = super(LogLearningRateScheduler, self).get_config()
78+
config.update({"lr_start": self.lr_start, "lr_stop": self.lr_stop, "epochs": self.epochs, "epomin": self.epomin})
79+
return config
80+
81+
82+
class Conv(tf.keras.layers.Layer):
83+
84+
def __init__(self, input_shape, pool_size=2, strides=2, **kwargs):
85+
super(Conv, self).__init__()
86+
87+
self.layer_conv = []
88+
self.layer_conv += [
89+
90+
tf.keras.layers.Conv2D(
91+
filters=kwargs['n_filters'],
92+
kernel_size=(kwargs['kernel_size'], kwargs['kernel_size']),
93+
padding='same',
94+
input_shape=input_shape,
95+
name="Conf2D_%i"%(0)),
96+
tf.keras.layers.ReLU(),
97+
98+
tf.keras.layers.MaxPool2D(pool_size,
99+
strides,
100+
padding='same')]
101+
for i in range(kwargs['n_conv_steps']-1):
102+
103+
self.layer_conv += [
104+
tf.keras.layers.Conv2D(
105+
filters=kwargs['n_filters'],
106+
kernel_size=(kwargs['kernel_size'], kwargs['kernel_size']),
107+
padding='same',
108+
name="Conf2D_%i"%(i+1)),
109+
tf.keras.layers.ReLU(),
110+
tf.keras.layers.MaxPool2D(pool_size,
111+
strides,
112+
padding='same')]
113+
114+
def call(self, x):
115+
for i, l in enumerate(self.layer_conv):
116+
x = l(x)
117+
print(f'cnn layer: {l.name} {x.shape}')
118+
print("Final layer")
119+
return x
120+
121+
122+
class ConvPeriodicPadding(tf.keras.layers.Layer):
123+
124+
def __init__(self, input_shape, pool_size=2, strides=2, **kwargs):
125+
super(ConvPeriodicPadding, self).__init__()
126+
127+
self.layer_conv = []
128+
self.layer_conv += [
129+
tf.keras.layers.Conv2D(
130+
filters=kwargs['n_filters'],
131+
kernel_size=(kwargs['kernel_size'], kwargs['kernel_size']),
132+
padding='valid',
133+
input_shape=input_shape,
134+
name="Conf2D_%i"%(0)),
135+
tf.keras.layers.ReLU(),
136+
tf.keras.layers.MaxPool2D(pool_size=pool_size,
137+
strides=strides,
138+
padding='valid')
139+
]
140+
141+
for i in range(kwargs['n_conv_steps']-1):
142+
143+
self.layer_conv += [
144+
tf.keras.layers.Conv2D(
145+
filters=kwargs['n_filters'],
146+
kernel_size=(kwargs['kernel_size'], kwargs['kernel_size']),
147+
padding='valid',
148+
name="Conf2D_%i"%(i+1)),
149+
tf.keras.layers.ReLU(),
150+
tf.keras.layers.MaxPool2D(pool_size=pool_size,
151+
strides=strides,
152+
padding='valid')
153+
]
154+
155+
def call(self, x):
156+
print("\n\n ### Start NN")
157+
intermediate_sum = []
158+
for i, l in enumerate(self.layer_conv):
159+
if i%3==0: # conv layer
160+
print(" ### Conv layer")
161+
x = periodic_padding_flexible(x, axis=(1,2), padding=(3,3))
162+
print(f'cnn layer: periodic padding for conf {x.shape}, axis: (1,2), padding: (3,3)')
163+
x = l(x)
164+
print(f'cnn layer: {l.name} {x.shape}')
165+
elif i%3==1: # act layer
166+
print(" ### Act layer")
167+
x = l(x)
168+
print(f'cnn layer: {l.name} {x.shape}')
169+
elif i%3==2: # pooling layer
170+
print(" ### Pool layer")
171+
x = periodic_padding_flexible(x, axis=1, padding=1)
172+
print(f'cnn layer: periodic padding for pool {x.shape}, axis: (1), padding: (1)')
173+
x = l(x)
174+
print(f'cnn layer: {l.name} {x.shape}')
175+
intermediate_sum.append(x)
176+
177+
return(x), (intermediate_sum)
178+
179+
180+
181+
182+
class CustomSum(tf.keras.layers.Layer):
183+
def __init__(self, **kwargs):
184+
super(CustomSum, self).__init__()
185+
self.customsum = tf.keras.layers.Conv1D(
186+
input_shape=np.zeros((1, 5, 12, 117)),
187+
kernel_size=kwargs['kernel_size_customsum'],
188+
filters=kwargs['n_filters_customsum'])
189+
def call(self, x):
190+
x = self.customsum(x)
191+
return x
192+
193+
194+
class FC(tf.keras.layers.Layer):
195+
def __init__(self, **kwargs):
196+
super(FC, self).__init__()
197+
self.flatten = tf.keras.layers.Flatten()
198+
self.fc1 = tf.keras.layers.Dense(units=kwargs['dense_size'], activation='relu')
199+
self.dropout = tf.keras.layers.Dropout(rate=kwargs['dropout'],)
200+
self.fc2 = tf.keras.layers.Dense(2, activation='linear')
201+
202+
def call(self, x):
203+
x = self.flatten(x)
204+
x = self.fc1(x)
205+
x = self.dropout(x)
206+
x = self.fc2(x)
207+
return x
208+
209+
class Cnnmodel_Shift_Flip(tf.keras.Model):
210+
def __init__(self, input_shape, model_kwargs):
211+
super(Cnnmodel_Shift_Flip, self).__init__()
212+
#print(f'input shape {input_shape}')
213+
self.conv = ConvPeriodicPadding(input_shape=input_shape, pool_size=2, strides=2, **model_kwargs)
214+
self.customsum = CustomSum(**model_kwargs)
215+
self.fc = FC(**model_kwargs)
216+
217+
def call(self, x):
218+
x2, intermediate_states = self.conv(x)
219+
x2_flip, intermediate_states_flip = self.conv(tf.image.flip_up_down(x))
220+
print("shape before summation: ", x2.shape)
221+
x2 = self.customsum(sum(x2, axis=-2))
222+
print("shape after summation and Conv1D: ", x2.shape)
223+
x2_flip = self.customsum(sum(x2_flip, axis=-2))
224+
225+
summed = []
226+
summed_flip = []
227+
for s in intermediate_states:
228+
print("shape of intermediate state:", s.shape)
229+
print("shape of state after sum:", (sum(s, axis=-2)).shape)
230+
summed.append(self.customsum(sum(s, axis=-2)))
231+
print("length of list after summation along y-axis: ", len(summed))
232+
for f in intermediate_states_flip:
233+
print("shape of flip's intermediate state:", f.shape)
234+
print("shape of flip's state after sum:", (sum(f, axis=-2)).shape)
235+
summed_flip.append(self.customsum(sum(f, axis=-2)))
236+
237+
x3 = tf.concat(summed, axis=-2)
238+
x3_flip = tf.concat(summed_flip, axis=-2)
239+
print("shape after concatenate / flip and non-flip: ", x3.shape, x3_flip.shape)
240+
x3_add = Add()([x3, x3_flip])
241+
print("shape after Add:", x3_add.shape)
242+
outputs = self.fc(x3_add)
243+
print(" ### End NN\n\n")
244+
print(f'shape of outputs:{outputs.shape}')
245+
print(f'shape of x2:{x2.shape}')
246+
print(f'shape of x3:{x3.shape}')
247+
return outputs

bwuni.yml

+116
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
name: tf_keras
2+
channels:
3+
- defaults
4+
dependencies:
5+
- _libgcc_mutex=0.1=main
6+
- _openmp_mutex=5.1=1_gnu
7+
- bzip2=1.0.8=h7b6447c_0
8+
- ca-certificates=2023.12.12=h06a4308_0
9+
- ld_impl_linux-64=2.38=h1181459_1
10+
- libffi=3.4.4=h6a678d5_0
11+
- libgcc-ng=11.2.0=h1234567_1
12+
- libgomp=11.2.0=h1234567_1
13+
- libstdcxx-ng=11.2.0=h1234567_1
14+
- libuuid=1.41.5=h5eee18b_0
15+
- ncurses=6.4=h6a678d5_0
16+
- openssl=3.0.13=h7f8727e_0
17+
- pip=23.3.1=py310h06a4308_0
18+
- python=3.10.13=h955ad1f_0
19+
- readline=8.2=h5eee18b_0
20+
- sqlite=3.41.2=h5eee18b_0
21+
- tk=8.6.12=h1ccaba5_0
22+
- xz=5.4.5=h5eee18b_0
23+
- zlib=1.2.13=h5eee18b_0
24+
- pip:
25+
- absl-py==2.1.0
26+
- astunparse==1.6.3
27+
- botorch==0.10.0
28+
- cachetools==5.3.3
29+
- certifi==2024.2.2
30+
- charset-normalizer==3.3.2
31+
- filelock==3.9.0
32+
- flatbuffers==23.5.26
33+
- fsspec==2023.4.0
34+
- gast==0.5.4
35+
- google-auth==2.28.1
36+
- google-auth-oauthlib==1.0.0
37+
- google-pasta==0.2.0
38+
- gpytorch==1.11
39+
- grpcio==1.62.0
40+
- h5py==3.10.0
41+
- idna==3.6
42+
- jaxtyping==0.2.25
43+
- jinja2==3.1.2
44+
- joblib==1.3.2
45+
- keras==2.14.0
46+
- libclang==16.0.6
47+
- linear-operator==0.5.1
48+
- markdown==3.5.2
49+
- markupsafe==2.1.5
50+
- ml-dtypes==0.2.0
51+
- mpmath==1.3.0
52+
- multipledispatch==1.0.0
53+
- networkx==3.2.1
54+
- numpy==1.26.4
55+
- nvidia-cublas-cu11==11.11.3.6
56+
- nvidia-cublas-cu12==12.1.3.1
57+
- nvidia-cuda-cupti-cu11==11.8.87
58+
- nvidia-cuda-cupti-cu12==12.1.105
59+
- nvidia-cuda-nvrtc-cu11==11.8.89
60+
- nvidia-cuda-nvrtc-cu12==12.1.105
61+
- nvidia-cuda-runtime-cu11==11.8.89
62+
- nvidia-cuda-runtime-cu12==12.1.105
63+
- nvidia-cudnn-cu11==8.7.0.84
64+
- nvidia-cudnn-cu12==8.9.2.26
65+
- nvidia-cufft-cu11==10.9.0.58
66+
- nvidia-cufft-cu12==11.0.2.54
67+
- nvidia-curand-cu11==10.3.0.86
68+
- nvidia-curand-cu12==10.3.2.106
69+
- nvidia-cusolver-cu11==11.4.1.48
70+
- nvidia-cusolver-cu12==11.4.5.107
71+
- nvidia-cusparse-cu11==11.7.5.86
72+
- nvidia-cusparse-cu12==12.1.0.106
73+
- nvidia-nccl-cu11==2.19.3
74+
- nvidia-nccl-cu12==2.19.3
75+
- nvidia-nvjitlink-cu12==12.3.101
76+
- nvidia-nvtx-cu11==11.8.86
77+
- nvidia-nvtx-cu12==12.1.105
78+
- oauthlib==3.2.2
79+
- opt-einsum==3.3.0
80+
- packaging==23.2
81+
- pandas==2.2.1
82+
- pillow==10.2.0
83+
- pip==23.3.1
84+
- protobuf==4.25.3
85+
- pyasn1==0.5.1
86+
- pyasn1-modules==0.3.0
87+
- pyro-api==0.1.2
88+
- pyro-ppl==1.9.0
89+
- python-dateutil==2.8.2
90+
- pytz==2024.1
91+
- requests==2.31.0
92+
- requests-oauthlib==1.3.1
93+
- rsa==4.9
94+
- scikit-learn==1.4.1.post1
95+
- scipy==1.12.0
96+
- setuptools==68.2.2
97+
- six==1.16.0
98+
- sympy==1.12
99+
- tensorboard==2.14.1
100+
- tensorboard-data-server==0.7.2
101+
- tensorflow==2.14.0
102+
- tensorflow-estimator==2.14.0
103+
- tensorflow-io-gcs-filesystem==0.36.0
104+
- termcolor==2.4.0
105+
- threadpoolctl==3.3.0
106+
- torch==2.2.1
107+
- tqdm==4.66.2
108+
- triton==2.2.0
109+
- typeguard==2.13.3
110+
- typing-extensions==4.10.0
111+
- tzdata==2024.1
112+
- urllib3==2.2.1
113+
- werkzeug==3.0.1
114+
- wheel==0.41.2
115+
- wrapt==1.14.1
116+
prefix: /home/kit/iti/om6872/.conda/envs/tf_keras

0 commit comments

Comments
 (0)