Skip to content

Commit 85b5340

Browse files
committed
make format
1 parent 995a902 commit 85b5340

File tree

85 files changed

+816
-790
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

85 files changed

+816
-790
lines changed

examples/basic_tutorials/tutorial_cifar10_cnn_static.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
import tensorflow as tf
99

1010
import tensorlayer as tl
11-
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input,
12-
LocalResponseNorm, MaxPool2d)
11+
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d)
1312
from tensorlayer.models import Model
1413

1514
# enable debug logging

examples/data_process/tutorial_fast_affine_transform.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
import multiprocessing
99
import time
1010

11+
import cv2
1112
import numpy as np
1213
import tensorflow as tf
1314

14-
import cv2
1515
import tensorlayer as tl
1616

1717
# tl.logging.set_verbosity(tl.logging.DEBUG)

examples/data_process/tutorial_tfrecord3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,8 +231,8 @@ def distort_image(image, thread_id):
231231

232232

233233
def prefetch_input_data(
234-
reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16,
235-
num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue"
234+
reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16,
235+
num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue"
236236
):
237237
"""Prefetches string values from disk into an input queue.
238238

examples/deprecated_tutorials/tutorial_imagenet_inceptionV3_distributed.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@
2121
import numpy as np
2222
import tensorflow as tf
2323
from tensorflow.contrib import slim
24-
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import (inception_v3,
25-
inception_v3_arg_scope)
24+
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import (inception_v3, inception_v3_arg_scope)
2625
from tensorflow.python.framework.errors_impl import OutOfRangeError
2726
from tensorflow.python.training import session_run_hook
2827
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook

examples/distributed_training/tutorial_cifar10_distributed_trainer.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@
1818
import tensorflow as tf
1919

2020
import tensorlayer as tl
21-
from tensorlayer.layers import (BatchNormLayer, Conv2d, DenseLayer,
22-
FlattenLayer, InputLayer, MaxPool2d)
21+
from tensorlayer.layers import (BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, MaxPool2d)
2322

2423
tf.logging.set_verbosity(tf.logging.DEBUG)
2524
tl.logging.set_verbosity(tl.logging.DEBUG)

examples/reinforcement_learning/tutorial_A3C.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@
6262

6363
tl.logging.set_verbosity(tl.logging.DEBUG)
6464

65-
6665
# add arguments in command --train/test
6766
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
6867
parser.add_argument('--train', dest='train', action='store_true', default=False)
@@ -177,7 +176,7 @@ def save(self): # save trained weights
177176
os.makedirs(path)
178177
tl.files.save_npz(self.actor.trainable_weights, name=os.path.join(path, 'model_actor.npz'))
179178
tl.files.save_npz(self.critic.trainable_weights, name=os.path.join(path, 'model_critic.npz'))
180-
179+
181180
def load(self): # load trained weights
182181
path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID]))
183182
tl.files.load_and_assign_npz(name=os.path.join(path, 'model_actor.npz'), network=self.actor)
@@ -296,7 +295,7 @@ def work(self, globalAC):
296295
COORD.join(worker_threads)
297296

298297
GLOBAL_AC.save()
299-
298+
300299
plt.plot(GLOBAL_RUNNING_R)
301300
if not os.path.exists('image'):
302301
os.makedirs('image')
@@ -319,4 +318,6 @@ def work(self, globalAC):
319318
print(
320319
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
321320
episode + 1, TEST_EPISODES, episode_reward,
322-
time.time() - T0))
321+
time.time() - T0
322+
)
323+
)

examples/reinforcement_learning/tutorial_AC.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,6 @@
7878
LR_A = 0.001 # learning rate for actor
7979
LR_C = 0.01 # learning rate for critic
8080

81-
82-
8381
############################### Actor-Critic ####################################
8482

8583

@@ -205,7 +203,7 @@ def load(self): # load trained weights
205203
state_new, reward, done, info = env.step(action)
206204
state_new = state_new.astype(np.float32)
207205

208-
if done: reward = -20 # reward shaping trick
206+
if done: reward = -20 # reward shaping trick
209207
# these may helpful in some tasks
210208
# if abs(s_new[0]) >= env.observation_space.high[0]:
211209
# # cart moves more than 2.4 units from the center
@@ -240,7 +238,7 @@ def load(self): # load trained weights
240238

241239
# Early Stopping for quick check
242240
if step >= MAX_STEPS:
243-
print("Early Stopping") # Hao Dong: it is important for this task
241+
print("Early Stopping") # Hao Dong: it is important for this task
244242
break
245243
actor.save()
246244
critic.save()

examples/reinforcement_learning/tutorial_C51.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,8 @@ def _train_func(self, b_o, b_index, b_m):
309309
nepisode += 1
310310
print(
311311
'Training | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
312-
nepisode, episode_reward, time.time() - t0
312+
nepisode, episode_reward,
313+
time.time() - t0
313314
)
314315
) # episode num starts from 1 in print
315316

@@ -336,6 +337,7 @@ def _train_func(self, b_o, b_index, b_m):
336337
nepisode += 1
337338
print(
338339
'Testing | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
339-
nepisode, episode_reward, time.time() - t0
340+
nepisode, episode_reward,
341+
time.time() - t0
340342
)
341343
)

examples/reinforcement_learning/tutorial_DDPG.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929

3030
import argparse
3131
import os
32-
import threading
3332
import time
3433

3534
import gym
@@ -159,9 +158,9 @@ def get_action(self, s, greedy=False):
159158
a = self.actor(np.array([s], dtype=np.float32))[0]
160159
if greedy:
161160
return a
162-
return np.clip(np.random.normal(a, self.var),
163-
-self.action_range,
164-
self.action_range) # add randomness to action selection for exploration
161+
return np.clip(
162+
np.random.normal(a, self.var), -self.action_range, self.action_range
163+
) # add randomness to action selection for exploration
165164

166165
def learn(self):
167166
"""
@@ -276,7 +275,7 @@ def load(self):
276275
all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1)
277276
print(
278277
'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
279-
episode+1, TRAIN_EPISODES, episode_reward,
278+
episode + 1, TRAIN_EPISODES, episode_reward,
280279
time.time() - t0
281280
)
282281
)
@@ -301,4 +300,6 @@ def load(self):
301300
print(
302301
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
303302
episode + 1, TEST_EPISODES, episode_reward,
304-
time.time() - t0))
303+
time.time() - t0
304+
)
305+
)

examples/reinforcement_learning/tutorial_DPPO.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@
7373
# ppo-clip parameters
7474
EPSILON = 0.2
7575

76-
7776
############################### DPPO ####################################
7877

7978

@@ -305,7 +304,8 @@ def work(self):
305304

306305
print(
307306
'Training | Episode: {}/{} | Worker: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
308-
GLOBAL_EP + 1, TRAIN_EPISODES, self.wid, ep_r, time.time() - T0
307+
GLOBAL_EP + 1, TRAIN_EPISODES, self.wid, ep_r,
308+
time.time() - T0
309309
)
310310
)
311311
# record reward changes, plot later
@@ -372,4 +372,6 @@ def work(self):
372372
print(
373373
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
374374
episode + 1, TEST_EPISODES, episode_reward,
375-
time.time() - T0))
375+
time.time() - T0
376+
)
377+
)

0 commit comments

Comments
 (0)