|
20 | 20 | "cell_type": "markdown",
|
21 | 21 | "metadata": {},
|
22 | 22 | "source": [
|
23 |
| - "### Task 1: Look at the data\n", |
| 23 | + "### Task 1 -- Part (a): Look at the data\n", |
24 | 24 | "In the following code block, we import the ``load_penguins`` function from the ``palmerpenguins`` package.\n",
|
25 | 25 | "\n",
|
26 | 26 | "- Call this function, which returns a single object, and assign it to the variable ``data``.\n",
|
|
62 | 62 | "outputs": [],
|
63 | 63 | "source": [
|
64 | 64 | "# import seaborn as sns\n",
|
65 |
| - "# sns.pairplot(data.drop(\"year\", axis=1), hue='species')" |
| 65 | + "\n", |
| 66 | + "# species_palette = {\n", |
| 67 | + "# \"Adelie\": sns.color_palette()[0], # Blue\n", |
| 68 | + "# \"Chinstrap\": sns.color_palette()[1], # Orange\n", |
| 69 | + "# \"Gentoo\": sns.color_palette()[2], # Green\n", |
| 70 | + "# }\n", |
| 71 | + "\n", |
| 72 | + "# sns.pairplot(\n", |
| 73 | + "# data.drop(\"year\", axis=1),\n", |
| 74 | + "# hue=\"species\",\n", |
| 75 | + "# palette=species_palette,\n", |
| 76 | + "# hue_order=[\"Adelie\", \"Chinstrap\", \"Gentoo\"], " |
66 | 77 | ]
|
67 | 78 | },
|
68 | 79 | {
|
|
105 | 116 | "reducer = umap.UMAP(random_state=42)\n",
|
106 | 117 | "embedding = reducer.fit_transform(scaled_penguin_data)\n",
|
107 | 118 | "\n",
|
108 |
| - "colors = sns.color_palette()\n", |
109 |
| - "\n", |
110 | 119 | "for i, (species, group) in enumerate(data.groupby(\"species\")):\n",
|
111 | 120 | " plt.scatter(\n",
|
112 | 121 | " embedding[data.species == species, 0],\n",
|
113 | 122 | " embedding[data.species == species, 1],\n",
|
114 | 123 | " label=species,\n",
|
115 |
| - " color=colors[i],\n", |
| 124 | + " color=species_palette[species],\n", |
116 | 125 | " )\n",
|
117 | 126 | "\n",
|
118 | 127 | "plt.gca().set_aspect(\"equal\", \"datalim\")\n",
|
|
357 | 366 | "metadata": {},
|
358 | 367 | "outputs": [],
|
359 | 368 | "source": [
|
360 |
| - "# Complete __len__() and __getitem__() functions\n", |
| 369 | + "# Complete the __getitem__() function\n", |
361 | 370 | "# See Task 4 ``TODOs`` in PenguinDataset class.\n",
|
362 | 371 | "\n",
|
363 | 372 | "# Create train_set\n",
|
|
540 | 549 | "# Print the model architecture.\n",
|
541 | 550 | "# print(res_model)\n",
|
542 | 551 | "\n",
|
543 |
| - "list(res_model.parameters())" |
| 552 | + "# list(res_model.parameters())" |
544 | 553 | ]
|
545 | 554 | },
|
546 | 555 | {
|
|
778 | 787 | "\n",
|
779 | 788 | " # zero the gradients (otherwise gradients accumulate)\n",
|
780 | 789 | "\n",
|
781 |
| - " # run forward model and compute proxy probabilities over dimension 1 (columns of tensor).\n", |
| 790 | + " # run forward model to make predictions\n", |
782 | 791 | "\n",
|
783 | 792 | " # compute loss\n",
|
784 | 793 | " # e.g. pred : Tensor([3]) and target : int\n",
|
785 | 794 | "\n",
|
786 | 795 | " # compute gradients\n",
|
787 | 796 | "\n",
|
788 |
| - " # nudge parameters in direction of steepest descent c\n", |
| 797 | + " # nudge parameters in direction of steepest descent\n", |
789 | 798 | "\n",
|
790 | 799 | " # append metrics\n",
|
791 | 800 | "\n",
|
|
0 commit comments