Skip to content

Commit 148a38c

Browse files
committed
corrected tree height
1 parent a3565bc commit 148a38c

File tree

2 files changed

+11
-13
lines changed

2 files changed

+11
-13
lines changed

graph.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{},"outputs":[],"source":"import re\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport utils.viz as torchviz\ntorchviz.init()"},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":"x = torch.tensor([[255., 232, 132, 12, 343, 21, 0, 32, 12]]) / 255\ny = torch.tensor([1., 0, 0])"},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[],"source":"w = torch.rand(9, 3, requires_grad=True)\nb = torch.rand(1, 3, requires_grad=True)"},{"cell_type":"code","execution_count":4,"metadata":{},"outputs":[],"source":"h = x.mm(w) + b\nloss = ((h - y)**2).mean()\n\n# here\n"},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":"c = torchviz.draw(loss)\nc"},{"cell_type":"code","execution_count":9,"metadata":{},"outputs":[],"source":"loss.backward()"},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[],"source":"print(w.grad, '\\n', b.grad)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":["# Linear"]},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"class LinearModel(nn.Module):\n def __init__(self, x, y):\n super(LinearModel, self).__init__()\n self.layer1 = nn.Linear(x, y)\n\n def forward(self, x):\n x = self.layer1(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"model = LinearModel(9, 3)\ncost = nn.MSELoss()\npred = model(x)\nloss = cost(pred, y)\ntorchviz.draw(loss)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":["# Neural Networks"]},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"class NeuralNework(nn.Module):\n def __init__(self, x, y):\n super(NeuralNework, self).__init__()\n hidden = int(x/2)\n self.layer1 = nn.Linear(x, hidden)\n self.layer2 = nn.Linear(hidden, hidden)\n self.output = nn.Linear(hidden, y)\n\n def forward(self, x):\n x = F.relu(self.layer1(x))\n x = F.relu(self.layer2(x))\n x = self.output(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"model = NeuralNework(9, 3)\ncost = nn.MSELoss()\npred = model(x)\nloss = cost(pred, y)\ntorchviz.draw(loss)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":["# Convolutional Neural Network"]},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"from torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import datasets, transforms\n\ndigits = datasets.MNIST('data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.view(28*28))\n ]),\n target_transform=transforms.Compose([\n transforms.Lambda(lambda y: \n torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))\n ])\n )"},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"class CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = x.view(-1, 1, 28, 28)\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":0,"metadata":{},"outputs":[],"source":"model = CNN()\ncost = torch.nn.BCELoss()\npred = model(digits[0][0])\nloss = cost(pred, digits[0][1].view(1, -1))\ntorchviz.draw(loss)"}],"nbformat":4,"nbformat_minor":2,"metadata":{"language_info":{"name":"python","codemirror_mode":{"name":"ipython","version":3}},"orig_nbformat":2,"file_extension":".py","mimetype":"text/x-python","name":"python","npconvert_exporter":"python","pygments_lexer":"ipython3","version":3}}
1+
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{},"outputs":[],"source":"import re\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport utils.viz as torchviz\ntorchviz.init()"},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":"x = torch.tensor([[255., 232, 132, 12, 343, 21, 0, 32, 12]]) / 255\ny = torch.tensor([1., 0, 0])"},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[],"source":"w = torch.rand(9, 3, requires_grad=True)\nb = torch.rand(1, 3, requires_grad=True)"},{"cell_type":"code","execution_count":4,"metadata":{},"outputs":[],"source":"h = x.mm(w) + b\nloss = ((h - y)**2).mean()\n\n# here\n"},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":"c = torchviz.draw(loss)\nc"},{"cell_type":"code","execution_count":6,"metadata":{},"outputs":[],"source":"loss.backward()"},{"cell_type":"code","execution_count":7,"metadata":{},"outputs":[],"source":"print(w.grad, '\\n', b.grad)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":"# Linear"},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[],"source":"class LinearModel(nn.Module):\n def __init__(self, x, y):\n super(LinearModel, self).__init__()\n self.layer1 = nn.Linear(x, y)\n\n def forward(self, x):\n x = self.layer1(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":9,"metadata":{},"outputs":[],"source":"model = LinearModel(9, 3)\ncost = nn.MSELoss()\npred = model(x)\nloss = cost(pred, y.view(1, -1))\ntorchviz.draw(loss)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":"# Neural Networks"},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[],"source":"class NeuralNework(nn.Module):\n def __init__(self, x, y):\n super(NeuralNework, self).__init__()\n hidden = int(x/2)\n self.layer1 = nn.Linear(x, hidden)\n self.layer2 = nn.Linear(hidden, hidden)\n self.output = nn.Linear(hidden, y)\n\n def forward(self, x):\n x = F.relu(self.layer1(x))\n x = F.relu(self.layer2(x))\n x = self.output(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[],"source":"model = NeuralNework(9, 3)\ncost = nn.MSELoss()\npred = model(x)\nloss = cost(pred, y.view(1, -1))\ntorchviz.draw(loss)"},{"cell_type":"markdown","metadata":{},"outputs":[],"source":"# Convolutional Neural Network"},{"cell_type":"code","execution_count":12,"metadata":{},"outputs":[],"source":"from torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import datasets, transforms\n\ndigits = datasets.MNIST('data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.view(28*28))\n ]),\n target_transform=transforms.Compose([\n transforms.Lambda(lambda y: \n torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))\n ])\n )"},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[],"source":"class CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = x.view(-1, 1, 28, 28)\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.softmax(x, dim=1)"},{"cell_type":"code","execution_count":14,"metadata":{},"outputs":[],"source":"model = CNN()\ncost = torch.nn.BCELoss()\npred = model(digits[0][0])\nloss = cost(pred, digits[0][1].view(1, -1))\ntorchviz.draw(loss)"},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":""}],"nbformat":4,"nbformat_minor":2,"metadata":{"language_info":{"name":"python","codemirror_mode":{"name":"ipython","version":3}},"orig_nbformat":2,"file_extension":".py","mimetype":"text/x-python","name":"python","npconvert_exporter":"python","pygments_lexer":"ipython3","version":3}}

utils/viz.py

+10-12
Original file line numberDiff line numberDiff line change
@@ -8,19 +8,19 @@
88
def init():
99
return HTML('<script src="https://cdn.jsdelivr.net/npm/vega@5"></script><span>torchviz initialized!</span>')
1010

11-
def get_dict(g, width=650, height=300):
11+
def get_dict(g, depth):
1212
d = {
1313
"$schema": "https://vega.github.io/schema/vega/v5.json",
14-
"width": width,
15-
"height": height,
14+
"width": 650,
15+
"height": depth*30,
1616
"padding": 5,
1717
"signals": [
1818
{
1919
"name": "labels", "value": True,
2020
"bind": {"input": "checkbox"}
2121
},
2222
{
23-
"name": "method", "value": "cluster",
23+
"name": "method", "value": "tidy",
2424
"bind": {"input": "select", "options": ["tidy", "cluster"]}
2525
},
2626
{
@@ -110,7 +110,7 @@ def get_dict(g, width=650, height=300):
110110
d['data'][0]['values'] = g
111111
return d
112112

113-
def build_graph(g, elements=[], parentId=-1):
113+
def build_graph(g, elements=[], parentId=-1, depth=0):
114114
elm = { 'id': len(elements), 'parentId': None if parentId==-1 else parentId}
115115
if g == None:
116116
elm['name'] = 'Const'
@@ -127,10 +127,11 @@ def build_graph(g, elements=[], parentId=-1):
127127
elements.append(elm)
128128

129129
if g != None and g.next_functions != None:
130+
depth = depth+1
130131
for subg in g.next_functions:
131-
build_graph(subg[0], elements, elm['id'])
132+
elements, depth = build_graph(subg[0], elements, elm['id'], depth)
132133

133-
return elements
134+
return elements, depth
134135

135136
def draw_graph(graph):
136137
return Javascript("""
@@ -145,9 +146,6 @@ def draw_graph(graph):
145146
""" % json.dumps(graph))
146147

147148
def draw(g):
148-
graph = build_graph(g.grad_fn, elements=[])
149-
j = get_dict(graph)
150-
#print(json.dumps(j, indent=2))
151-
#return alt.Chart.from_dict(j)
152-
#return json.dumps(j)
149+
graph, depth = build_graph(g.grad_fn, elements=[])
150+
j = get_dict(graph, depth)
153151
return draw_graph(j)

0 commit comments

Comments
 (0)