Skip to content
This repository was archived by the owner on Aug 18, 2021. It is now read-only.

Commit 4e14f39

Browse files
committed
dont use tuple for indexing, addresses #32
1 parent 9d7ab1a commit 4e14f39

File tree

1 file changed

+8
-5
lines changed

1 file changed

+8
-5
lines changed

reinforce-gridworld/reinforce-gridworld.ipynb

+8-5
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,8 @@
147147
" E = self.grid_size + VISIBLE_RADIUS - 1\n",
148148
" gps = [(E, E), (S, E), (E, S), (S, S)]\n",
149149
" gp = gps[random.randint(0, len(gps)-1)]\n",
150-
" self.grid[gp] = GOAL_VALUE\n",
150+
" gy, gx = gp\n",
151+
" self.grid[gy, gx] = GOAL_VALUE\n",
151152
" \n",
152153
" def visible(self, pos):\n",
153154
" y, x = pos\n",
@@ -237,7 +238,8 @@
237238
" def record_step(self):\n",
238239
" \"\"\"Add the current state to history for display later\"\"\"\n",
239240
" grid = np.array(self.grid.grid)\n",
240-
" grid[self.agent.pos] = self.agent.health * 0.5 # Agent marker faded by health\n",
241+
" y, x = self.agent.pos\n",
242+
" grid[y, x] = self.agent.health * 0.5 # Agent marker faded by health\n",
241243
" visible = np.array(self.grid.visible(self.agent.pos))\n",
242244
" self.history.append((grid, visible, self.agent.health))\n",
243245
" \n",
@@ -256,8 +258,9 @@
256258
" self.agent.act(action)\n",
257259
" \n",
258260
" # Get reward from where agent landed, add to agent health\n",
259-
" value = self.grid.grid[self.agent.pos]\n",
260-
" self.grid.grid[self.agent.pos] = 0\n",
261+
" y, x = self.agent.pos\n",
262+
" value = self.grid.grid[y, x]\n",
263+
" self.grid.grid[y, x] = 0\n",
261264
" self.agent.health += value\n",
262265
" \n",
263266
" # Check if agent won (reached the goal) or lost (health reached 0)\n",
@@ -707,7 +710,7 @@
707710
"metadata": {
708711
"collapsed": false
709712
},
710-
"outputs": [ ],
713+
"outputs": [],
711714
"source": [
712715
"hidden_size = 50\n",
713716
"learning_rate = 1e-4\n",

0 commit comments

Comments
 (0)