Skip to content

Commit 59c0fa8

Browse files
author
SAAS R7 User1
committed
clean variable names
1 parent 97a7b55 commit 59c0fa8

15 files changed

+116
-448
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ model = ExNN(meta_info=meta_info,
108108
tuning_epochs=100,
109109
l1_proj=0.001,
110110
l1_subnet=0.01,
111-
smooth_lambda=10**(-6),
111+
l2_smooth=10**(-6),
112112
verbose=True,
113113
val_ratio=0.2,
114114
early_stop_thres=500)
178 Bytes
Binary file not shown.

docs/build/doctrees/examples.doctree

-8 Bytes
Binary file not shown.

docs/build/doctrees/modules.doctree

783 Bytes
Binary file not shown.

docs/build/html/_modules/exnn/exnn.html

+8-8
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,8 @@ <h1>Source code for exnn.exnn</h1><div class="highlight"><pre>
210210
<span class="sd"> :type l1_subnet: float</span>
211211
<span class="sd"> :param l1_subnet: optional, default=0.001, the strength of L1 penalty for scaling layer.</span>
212212

213-
<span class="sd"> :type smooth_lambda: float</span>
214-
<span class="sd"> :param smooth_lambda: optional, default=0.000001, the strength of roughness penalty for subnetworks.</span>
213+
<span class="sd"> :type l2_smooth: float</span>
214+
<span class="sd"> :param l2_smooth: optional, default=0.000001, the strength of roughness penalty for subnetworks.</span>
215215

216216
<span class="sd"> :type verbose: bool</span>
217217
<span class="sd"> :param verbose: optional, default=False. If True, detailed messages will be printed.</span>
@@ -233,7 +233,7 @@ <h1>Source code for exnn.exnn</h1><div class="highlight"><pre>
233233

234234
<span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">meta_info</span><span class="p">,</span> <span class="n">subnet_num</span><span class="p">,</span> <span class="n">subnet_arch</span><span class="o">=</span><span class="p">[</span><span class="mi">10</span><span class="p">,</span> <span class="mi">6</span><span class="p">],</span> <span class="n">task_type</span><span class="o">=</span><span class="s2">&quot;Regression&quot;</span><span class="p">,</span>
235235
<span class="n">activation_func</span><span class="o">=</span><span class="n">tf</span><span class="o">.</span><span class="n">tanh</span><span class="p">,</span> <span class="n">batch_size</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span> <span class="n">training_epochs</span><span class="o">=</span><span class="mi">10000</span><span class="p">,</span> <span class="n">lr_bp</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">lr_cl</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span>
236-
<span class="n">beta_threshold</span><span class="o">=</span><span class="mf">0.05</span><span class="p">,</span> <span class="n">tuning_epochs</span><span class="o">=</span><span class="mi">500</span><span class="p">,</span> <span class="n">l1_proj</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">l1_subnet</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">smooth_lambda</span><span class="o">=</span><span class="mf">0.000001</span><span class="p">,</span>
236+
<span class="n">beta_threshold</span><span class="o">=</span><span class="mf">0.05</span><span class="p">,</span> <span class="n">tuning_epochs</span><span class="o">=</span><span class="mi">500</span><span class="p">,</span> <span class="n">l1_proj</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">l1_subnet</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">l2_smooth</span><span class="o">=</span><span class="mf">0.000001</span><span class="p">,</span>
237237
<span class="n">verbose</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">val_ratio</span><span class="o">=</span><span class="mf">0.2</span><span class="p">,</span> <span class="n">early_stop_thres</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
238238

239239
<span class="nb">super</span><span class="p">(</span><span class="n">ExNN</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">meta_info</span><span class="o">=</span><span class="n">meta_info</span><span class="p">,</span>
@@ -246,7 +246,7 @@ <h1>Source code for exnn.exnn</h1><div class="highlight"><pre>
246246
<span class="n">lr_bp</span><span class="o">=</span><span class="n">lr_bp</span><span class="p">,</span>
247247
<span class="n">l1_proj</span><span class="o">=</span><span class="n">l1_proj</span><span class="p">,</span>
248248
<span class="n">l1_subnet</span><span class="o">=</span><span class="n">l1_subnet</span><span class="p">,</span>
249-
<span class="n">smooth_lambda</span><span class="o">=</span><span class="n">smooth_lambda</span><span class="p">,</span>
249+
<span class="n">l2_smooth</span><span class="o">=</span><span class="n">l2_smooth</span><span class="p">,</span>
250250
<span class="n">batch_size</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span>
251251
<span class="n">training_epochs</span><span class="o">=</span><span class="n">training_epochs</span><span class="p">,</span>
252252
<span class="n">tuning_epochs</span><span class="o">=</span><span class="n">tuning_epochs</span><span class="p">,</span>
@@ -261,12 +261,12 @@ <h1>Source code for exnn.exnn</h1><div class="highlight"><pre>
261261
<span class="k">def</span> <span class="nf">train_step_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
262262
<span class="k">with</span> <span class="n">tf</span><span class="o">.</span><span class="n">GradientTape</span><span class="p">()</span> <span class="k">as</span> <span class="n">tape_cl</span><span class="p">:</span>
263263
<span class="k">with</span> <span class="n">tf</span><span class="o">.</span><span class="n">GradientTape</span><span class="p">()</span> <span class="k">as</span> <span class="n">tape_bp</span><span class="p">:</span>
264-
<span class="n">pred</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">apply</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
264+
<span class="n">pred</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="fm">__call__</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
265265
<span class="n">pred_loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">loss_fn</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="n">pred</span><span class="p">)</span>
266266
<span class="n">regularization_loss</span> <span class="o">=</span> <span class="n">tf</span><span class="o">.</span><span class="n">math</span><span class="o">.</span><span class="n">add_n</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">proj_layer</span><span class="o">.</span><span class="n">losses</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">output_layer</span><span class="o">.</span><span class="n">losses</span><span class="p">)</span>
267267
<span class="n">cl_loss</span> <span class="o">=</span> <span class="n">pred_loss</span> <span class="o">+</span> <span class="n">regularization_loss</span>
268268
<span class="n">bp_loss</span> <span class="o">=</span> <span class="n">pred_loss</span> <span class="o">+</span> <span class="n">regularization_loss</span>
269-
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">smooth_lambda</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
269+
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">l2_smooth</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
270270
<span class="n">smoothness_loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">subnet_blocks</span><span class="o">.</span><span class="n">smooth_loss</span>
271271
<span class="n">bp_loss</span> <span class="o">+=</span> <span class="n">smoothness_loss</span>
272272

@@ -291,10 +291,10 @@ <h1>Source code for exnn.exnn</h1><div class="highlight"><pre>
291291
<span class="k">def</span> <span class="nf">train_step_finetune</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
292292

293293
<span class="k">with</span> <span class="n">tf</span><span class="o">.</span><span class="n">GradientTape</span><span class="p">()</span> <span class="k">as</span> <span class="n">tape</span><span class="p">:</span>
294-
<span class="n">pred</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">apply</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
294+
<span class="n">pred</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="fm">__call__</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
295295
<span class="n">pred_loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">loss_fn</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="n">pred</span><span class="p">)</span>
296296
<span class="n">total_loss</span> <span class="o">=</span> <span class="n">pred_loss</span>
297-
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">smooth_lambda</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
297+
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">l2_smooth</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
298298
<span class="n">smoothness_loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">subnet_blocks</span><span class="o">.</span><span class="n">smooth_loss</span>
299299
<span class="n">total_loss</span> <span class="o">+=</span> <span class="n">smoothness_loss</span>
300300

0 commit comments

Comments
 (0)