Skip to content
This repository was archived by the owner on Oct 19, 2023. It is now read-only.

Commit 88bf131

Browse files
authored
Merge pull request #4 from keon/tests
add sanity tests for chapter 3 and 4
2 parents 04452ec + b114c9d commit 88bf131

File tree

28 files changed

+282
-297
lines changed

28 files changed

+282
-297
lines changed

.travis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ install:
1919
# - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
2020
script:
2121
- python compile_notebooks.py
22-
- pytest .
22+
- travis_wait 30 pytest .
2323
# Check python install package
2424
# - pip install -e .
2525
# - tox -e $TOX_ENV

03-파이토치로_구현하는_신경망_ANN/autograd_basic.py

+3
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,13 @@
33

44
import torch
55

6+
67
w = torch.tensor(1.0, requires_grad=True)
78

9+
810
a = w*3
911
l = a**2
1012
l.backward()
1113
print(w.grad)
1214
print('l을 w로 미분한 값은 {}'.format(w.grad))
15+

03-파이토치로_구현하는_신경망_ANN/basic_neural_network.py

+12
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,12 @@
1010
import matplotlib.pyplot as plt
1111
import torch.nn.functional as F
1212

13+
1314
n_dim = 2
1415
x_train, y_train = make_blobs(n_samples=80, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
1516
x_test, y_test = make_blobs(n_samples=20, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
1617

18+
1719
def label_map(y_, from_, to_):
1820
y = numpy.copy(y_)
1921
for f in from_:
@@ -25,6 +27,7 @@ def label_map(y_, from_, to_):
2527
y_test = label_map(y_test, [0, 1], 0)
2628
y_test = label_map(y_test, [2, 3], 1)
2729

30+
2831
def vis_data(x,y = None, c = 'r'):
2932
if y is None:
3033
y = [None] * len(x)
@@ -38,12 +41,14 @@ def vis_data(x,y = None, c = 'r'):
3841
vis_data(x_train, y_train, c='r')
3942
plt.show()
4043

44+
4145
x_train = torch.FloatTensor(x_train)
4246
print(x_train.shape)
4347
x_test = torch.FloatTensor(x_test)
4448
y_train = torch.FloatTensor(y_train)
4549
y_test = torch.FloatTensor(y_test)
4650

51+
4752
class NeuralNet(torch.nn.Module):
4853
def __init__(self, input_size, hidden_size):
4954
super(NeuralNet, self).__init__()
@@ -61,16 +66,19 @@ def forward(self, input_tensor):
6166
output = self.sigmoid(linear2)
6267
return output
6368

69+
6470
model = NeuralNet(2, 5)
6571
learning_rate = 0.03
6672
criterion = torch.nn.BCELoss()
6773
epochs = 2000
6874
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
6975

76+
7077
model.eval()
7178
test_loss_before = criterion(model(x_test).squeeze(), y_test)
7279
print('Before Training, test loss is {}'.format(test_loss_before.item()))
7380

81+
7482
# 오차값이 0.73 이 나왔습니다. 이정도의 오차를 가진 모델은 사실상 분류하는 능력이 없다고 봐도 무방합니다.
7583
# 자, 이제 드디어 인공신경망을 학습시켜 퍼포먼스를 향상시켜 보겠습니다.
7684

@@ -84,20 +92,24 @@ def forward(self, input_tensor):
8492
train_loss.backward()
8593
optimizer.step()
8694

95+
8796
model.eval()
8897
test_loss = criterion(model(x_test).squeeze(), y_test)
8998
print('After Training, test loss is {}'.format(test_loss.item()))
9099

100+
91101
# 학습을 하기 전과 비교했을때 현저하게 줄어든 오차값을 확인 하실 수 있습니다.
92102
# 지금까지 인공신경망을 구현하고 학습시켜 보았습니다.
93103
# 이제 학습된 모델을 .pt 파일로 저장해 보겠습니다.
94104

95105
torch.save(model.state_dict(), './model.pt')
96106
print('state_dict format of the model: {}'.format(model.state_dict()))
97107

108+
98109
# `save()` 를 실행하고 나면 학습된 신경망의 가중치를 내포하는 model.pt 라는 파일이 생성됩니다. 아래 코드처럼 새로운 신경망 객체에 model.pt 속의 가중치값을 입력시키는 것 또한 가능합니다.
99110

100111
new_model = NeuralNet(2, 5)
101112
new_model.load_state_dict(torch.load('./model.pt'))
102113
new_model.eval()
103114
print('벡터 [-1, 1]이 레이블 1을 가질 확률은 {}'.format(new_model(torch.FloatTensor([-1,1])).item()))
115+

03-파이토치로_구현하는_신경망_ANN/image_recovery.py

+8
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,14 @@
1111
import pickle
1212
import matplotlib.pyplot as plt
1313

14+
1415
shp_original_img = (100, 100)
1516
broken_image = torch.FloatTensor( pickle.load(open('./broken_image_t.p', 'rb'),encoding='latin1' ) )
1617

18+
1719
plt.imshow(broken_image.view(100,100))
1820

21+
1922
def weird_function(x, n_iter=5):
2023
h = x
2124
filt = torch.tensor([-1./3, 1./3, -1./3])
@@ -28,11 +31,14 @@ def weird_function(x, n_iter=5):
2831
h = torch.cat( (h[h.shape[0]//2:],h[:h.shape[0]//2]), 0 )
2932
return h
3033

34+
3135
def distance_loss(hypothesis, broken_image):
3236
return torch.dist(hypothesis, broken_image)
3337

38+
3439
random_tensor = torch.randn(10000, dtype = torch.float)
3540

41+
3642
lr = 0.8
3743
for i in range(0,20000):
3844
random_tensor.requires_grad_(True)
@@ -44,4 +50,6 @@ def distance_loss(hypothesis, broken_image):
4450
if i % 1000 == 0:
4551
print('Loss at {} = {}'.format(i, loss.item()))
4652

53+
4754
plt.imshow(random_tensor.view(100,100).data)
55+

03-파이토치로_구현하는_신경망_ANN/tensor_basic.py

+5
Original file line numberDiff line numberDiff line change
@@ -12,28 +12,33 @@
1212
print("Shape:", x.shape)
1313
print("랭크(차원):", x.ndimension())
1414

15+
1516
# 랭크 늘리기
1617
x = torch.unsqueeze(x, 0)
1718
print(x)
1819
print("Size:", x.size())
1920
print("Shape:", x.shape)
2021
print("랭크(차원):", x.ndimension())
2122

23+
2224
# 랭크 줄이기
2325
x = torch.squeeze(x)
2426
print(x)
2527
print("Size:", x.size())
2628
print("Shape:", x.shape) #[3, 3] 2개의 차원에 각 3개의 원소를 가진 텐서
2729
print("랭크(차원):", x.ndimension())
2830

31+
2932
# 랭크의 형태 바꾸기
3033
x = x.view(9)
3134
print(x)
3235
print("Size:", x.size())
3336
print("Shape:", x.shape)
3437
print("랭크(차원):", x.ndimension())
3538

39+
3640
try:
3741
x = x.view(2,4)
3842
except Exception as e:
3943
print(e) #에러 출력
44+

03-파이토치로_구현하는_신경망_ANN/tensor_operation.py

+4
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,18 @@
1313
print("w:", w)
1414
print("x:", x)
1515

16+
1617
b = torch.randn(5,2, dtype=torch.float)
1718
print("b:", b.size())
1819
print("b:", b)
1920

21+
2022
wx = torch.mm(w,x) # w의 행은 5, x의 열은 2, 즉 shape는 [5, 2]입니다.
2123
print("wx size:", wx.size())
2224
print("wx:", wx)
2325

26+
2427
result = wx + b
2528
print("result size:", result.size())
2629
print("result:", result)
30+

04-딥러닝으로_패션_아이템_구분하기_DNN/fashion_mnist.ipynb

+5-6
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
"from torchvision import datasets, transforms, utils\n",
1717
"from torch.utils import data\n",
1818
"\n",
19-
"%matplotlib inline\n",
2019
"import matplotlib.pyplot as plt\n",
2120
"import numpy as np"
2221
]
@@ -231,21 +230,21 @@
231230
],
232231
"metadata": {
233232
"kernelspec": {
234-
"display_name": "Python 3",
233+
"display_name": "Python 2",
235234
"language": "python",
236-
"name": "python3"
235+
"name": "python2"
237236
},
238237
"language_info": {
239238
"codemirror_mode": {
240239
"name": "ipython",
241-
"version": 3
240+
"version": 2
242241
},
243242
"file_extension": ".py",
244243
"mimetype": "text/x-python",
245244
"name": "python",
246245
"nbconvert_exporter": "python",
247-
"pygments_lexer": "ipython3",
248-
"version": "3.7.0"
246+
"pygments_lexer": "ipython2",
247+
"version": "2.7.16"
249248
}
250249
},
251250
"nbformat": 4,

04-딥러닝으로_패션_아이템_구분하기_DNN/fashion_mnist.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,17 @@
66
from torchvision import datasets, transforms, utils
77
from torch.utils import data
88

9-
%matplotlib inline
109
import matplotlib.pyplot as plt
1110
import numpy as np
1211

12+
1313
# ## Fashion MNIST 데이터셋
1414

1515
transform = transforms.Compose([
1616
transforms.ToTensor()
1717
])
1818

19+
1920
trainset = datasets.FashionMNIST(
2021
root = './.data/',
2122
train = True,
@@ -29,6 +30,7 @@
2930
transform = transform
3031
)
3132

33+
3234
batch_size = 16
3335

3436
train_loader = data.DataLoader(
@@ -40,18 +42,22 @@
4042
batch_size = batch_size
4143
)
4244

45+
4346
dataiter = iter(train_loader)
4447
images, labels = next(dataiter)
4548

49+
4650
# ## 멀리서 살펴보기
4751
img = utils.make_grid(images, padding=0)
4852
npimg = img.numpy()
4953
plt.figure(figsize=(10, 7))
5054
plt.imshow(np.transpose(npimg, (1,2,0)))
5155
plt.show()
5256

57+
5358
print(labels)
5459

60+
5561
CLASSES = {
5662
0: 'T-shirt/top',
5763
1: 'Trouser',
@@ -70,6 +76,7 @@
7076
index = label.item()
7177
print(CLASSES[index])
7278

79+
7380
# ## 가까이서 살펴보기
7481
idx = 1
7582

@@ -79,3 +86,4 @@
7986
print(item_npimg.shape)
8087
plt.imshow(item_npimg, cmap='gray')
8188
plt.show()
89+

04-딥러닝으로_패션_아이템_구분하기_DNN/neural-network.ipynb renamed to 04-딥러닝으로_패션_아이템_구분하기_DNN/neural_network.ipynb

+5-5
Original file line numberDiff line numberDiff line change
@@ -281,21 +281,21 @@
281281
],
282282
"metadata": {
283283
"kernelspec": {
284-
"display_name": "Python 3",
284+
"display_name": "Python 2",
285285
"language": "python",
286-
"name": "python3"
286+
"name": "python2"
287287
},
288288
"language_info": {
289289
"codemirror_mode": {
290290
"name": "ipython",
291-
"version": 3
291+
"version": 2
292292
},
293293
"file_extension": ".py",
294294
"mimetype": "text/x-python",
295295
"name": "python",
296296
"nbconvert_exporter": "python",
297-
"pygments_lexer": "ipython3",
298-
"version": "3.7.0"
297+
"pygments_lexer": "ipython2",
298+
"version": "2.7.16"
299299
}
300300
},
301301
"nbformat": 4,

04-딥러닝으로_패션_아이템_구분하기_DNN/neural-network.py renamed to 04-딥러닝으로_패션_아이템_구분하기_DNN/neural_network.py

+10
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,22 @@
1010
import torch.nn.functional as F
1111
from torchvision import transforms, datasets
1212

13+
1314
USE_CUDA = torch.cuda.is_available()
1415
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
1516

17+
1618
EPOCHS = 30
1719
BATCH_SIZE = 64
1820

21+
1922
# ## 데이터셋 불러오기
2023

2124
transform = transforms.Compose([
2225
transforms.ToTensor()
2326
])
2427

28+
2529
trainset = datasets.FashionMNIST(
2630
root = './.data/',
2731
train = True,
@@ -46,6 +50,7 @@
4650
shuffle = True,
4751
)
4852

53+
4954
# ## 뉴럴넷으로 Fashion MNIST 학습하기
5055
# 입력 `x` 는 `[배치크기, 색, 높이, 넓이]`로 이루어져 있습니다.
5156
# `x.size()`를 해보면 `[64, 1, 28, 28]`이라고 표시되는 것을 보실 수 있습니다.
@@ -67,6 +72,7 @@ def forward(self, x):
6772
x = self.fc3(x)
6873
return x
6974

75+
7076
# ## 모델 준비하기
7177
# `to()` 함수는 모델의 파라미터들을 지정한 곳으로 보내는 역할을 합니다.
7278
# 일반적으로 CPU 1개만 사용할 경우 필요는 없지만,
@@ -77,6 +83,7 @@ def forward(self, x):
7783
model = Net().to(DEVICE)
7884
optimizer = optim.SGD(model.parameters(), lr=0.01)
7985

86+
8087
# ## 학습하기
8188

8289
def train(model, train_loader, optimizer):
@@ -90,6 +97,7 @@ def train(model, train_loader, optimizer):
9097
loss.backward()
9198
optimizer.step()
9299

100+
93101
# ## 테스트하기
94102

95103
def evaluate(model, test_loader):
@@ -114,6 +122,7 @@ def evaluate(model, test_loader):
114122
test_accuracy = 100. * correct / len(test_loader.dataset)
115123
return test_loss, test_accuracy
116124

125+
117126
# ## 코드 돌려보기
118127
# 자, 이제 모든 준비가 끝났습니다. 코드를 돌려서 실제로 훈련이 되는지 확인해봅시다!
119128

@@ -123,3 +132,4 @@ def evaluate(model, test_loader):
123132

124133
print('[{}] Test Loss: {:.4f}, Accuracy: {:.2f}%'.format(
125134
epoch, test_loss, test_accuracy))
135+

0 commit comments

Comments
 (0)