Skip to content

Commit fd9b790

Browse files
committed
Update solutions and exercises to be equivalent
1 parent 9ddd3ac commit fd9b790

File tree

2 files changed

+64
-29
lines changed

2 files changed

+64
-29
lines changed

exercises/01_penguin_classification.ipynb

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
"cell_type": "markdown",
2121
"metadata": {},
2222
"source": [
23-
"### Task 1: Look at the data\n",
23+
"### Task 1 -- Part (a): Look at the data\n",
2424
"In the following code block, we import the ``load_penguins`` function from the ``palmerpenguins`` package.\n",
2525
"\n",
2626
"- Call this function, which returns a single object, and assign it to the variable ``data``.\n",
@@ -62,7 +62,18 @@
6262
"outputs": [],
6363
"source": [
6464
"# import seaborn as sns\n",
65-
"# sns.pairplot(data.drop(\"year\", axis=1), hue='species')"
65+
"\n",
66+
"# species_palette = {\n",
67+
"# \"Adelie\": sns.color_palette()[0], # Blue\n",
68+
"# \"Chinstrap\": sns.color_palette()[1], # Orange\n",
69+
"# \"Gentoo\": sns.color_palette()[2], # Green\n",
70+
"# }\n",
71+
"\n",
72+
"# sns.pairplot(\n",
73+
"# data.drop(\"year\", axis=1),\n",
74+
"# hue=\"species\",\n",
75+
"# palette=species_palette,\n",
76+
"# hue_order=[\"Adelie\", \"Chinstrap\", \"Gentoo\"], "
6677
]
6778
},
6879
{
@@ -105,14 +116,12 @@
105116
"reducer = umap.UMAP(random_state=42)\n",
106117
"embedding = reducer.fit_transform(scaled_penguin_data)\n",
107118
"\n",
108-
"colors = sns.color_palette()\n",
109-
"\n",
110119
"for i, (species, group) in enumerate(data.groupby(\"species\")):\n",
111120
" plt.scatter(\n",
112121
" embedding[data.species == species, 0],\n",
113122
" embedding[data.species == species, 1],\n",
114123
" label=species,\n",
115-
" color=colors[i],\n",
124+
" color=species_palette[species],\n",
116125
" )\n",
117126
"\n",
118127
"plt.gca().set_aspect(\"equal\", \"datalim\")\n",
@@ -357,7 +366,7 @@
357366
"metadata": {},
358367
"outputs": [],
359368
"source": [
360-
"# Complete __len__() and __getitem__() functions\n",
369+
"# Complete the __getitem__() function\n",
361370
"# See Task 4 ``TODOs`` in PenguinDataset class.\n",
362371
"\n",
363372
"# Create train_set\n",
@@ -540,7 +549,7 @@
540549
"# Print the model architecture.\n",
541550
"# print(res_model)\n",
542551
"\n",
543-
"list(res_model.parameters())"
552+
"# list(res_model.parameters())"
544553
]
545554
},
546555
{
@@ -778,14 +787,14 @@
778787
"\n",
779788
" # zero the gradients (otherwise gradients accumulate)\n",
780789
"\n",
781-
" # run forward model and compute proxy probabilities over dimension 1 (columns of tensor).\n",
790+
" # run forward model to make predictions\n",
782791
"\n",
783792
" # compute loss\n",
784793
" # e.g. pred : Tensor([3]) and target : int\n",
785794
"\n",
786795
" # compute gradients\n",
787796
"\n",
788-
" # nudge parameters in direction of steepest descent c\n",
797+
" # nudge parameters in direction of steepest descent\n",
789798
"\n",
790799
" # append metrics\n",
791800
"\n",

worked-solutions/01_penguin_classification_solutions.ipynb

Lines changed: 46 additions & 20 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)