|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the MIT license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import torch |
| 7 | +import torchrl |
| 8 | +from tensordict import TensorDict, TensorDictBase |
| 9 | + |
| 10 | +from torchrl.data.map import MCTSForest, Tree |
| 11 | +from torchrl.envs import EnvBase |
| 12 | + |
| 13 | +C = 2.0**0.5 |
| 14 | + |
| 15 | + |
| 16 | +# TODO: Allow user to specify different priority functions with PR #2358 |
| 17 | +def _traversal_priority_UCB1(tree): |
| 18 | + subtree = tree.subtree |
| 19 | + visits = subtree.visits |
| 20 | + reward_sum = subtree.wins |
| 21 | + |
| 22 | + # If it's black's turn, flip the reward, since black wants to optimize for |
| 23 | + # the lowest reward, not highest. |
| 24 | + # TODO: Need a more generic way to do this, since not all use cases of MCTS |
| 25 | + # will be two player turn based games. |
| 26 | + if not subtree.rollout[0, 0]["turn"]: |
| 27 | + reward_sum = -reward_sum |
| 28 | + |
| 29 | + parent_visits = tree.visits |
| 30 | + reward_sum = reward_sum.squeeze(-1) |
| 31 | + priority = (reward_sum + C * torch.sqrt(torch.log(parent_visits))) / visits |
| 32 | + priority[visits == 0] = float("inf") |
| 33 | + return priority |
| 34 | + |
| 35 | + |
| 36 | +def _traverse_MCTS_one_step(forest, tree, env, max_rollout_steps): |
| 37 | + done = False |
| 38 | + trees_visited = [tree] |
| 39 | + |
| 40 | + while not done: |
| 41 | + if tree.subtree is None: |
| 42 | + td_tree = tree.rollout[-1]["next"].clone() |
| 43 | + |
| 44 | + if (tree.visits > 0 or tree.parent is None) and not td_tree["done"]: |
| 45 | + actions = env.all_actions(td_tree) |
| 46 | + subtrees = [] |
| 47 | + |
| 48 | + for action in actions: |
| 49 | + td = env.step(env.reset(td_tree).update(action)) |
| 50 | + new_node = torchrl.data.Tree( |
| 51 | + rollout=td.unsqueeze(0), |
| 52 | + node_data=td["next"].select(*forest.node_map.in_keys), |
| 53 | + count=torch.tensor(0), |
| 54 | + wins=torch.zeros_like(td["next", env.reward_key]), |
| 55 | + ) |
| 56 | + subtrees.append(new_node) |
| 57 | + |
| 58 | + # NOTE: This whole script runs about 2x faster with lazy stack |
| 59 | + # versus eager stack. |
| 60 | + tree.subtree = TensorDict.lazy_stack(subtrees) |
| 61 | + chosen_idx = torch.randint(0, len(subtrees), ()).item() |
| 62 | + rollout_state = subtrees[chosen_idx].rollout[-1]["next"] |
| 63 | + |
| 64 | + else: |
| 65 | + rollout_state = td_tree |
| 66 | + |
| 67 | + if rollout_state["done"]: |
| 68 | + rollout_reward = rollout_state[env.reward_key] |
| 69 | + else: |
| 70 | + rollout = env.rollout( |
| 71 | + max_steps=max_rollout_steps, |
| 72 | + tensordict=rollout_state, |
| 73 | + ) |
| 74 | + rollout_reward = rollout[-1]["next", env.reward_key] |
| 75 | + done = True |
| 76 | + |
| 77 | + else: |
| 78 | + priorities = _traversal_priority_UCB1(tree) |
| 79 | + chosen_idx = torch.argmax(priorities).item() |
| 80 | + tree = tree.subtree[chosen_idx] |
| 81 | + trees_visited.append(tree) |
| 82 | + |
| 83 | + for tree in trees_visited: |
| 84 | + tree.visits += 1 |
| 85 | + tree.wins += rollout_reward |
| 86 | + |
| 87 | + |
| 88 | +def MCTS( |
| 89 | + forest: MCTSForest, |
| 90 | + root: TensorDictBase, |
| 91 | + env: EnvBase, |
| 92 | + num_steps: int, |
| 93 | + max_rollout_steps: int | None = None, |
| 94 | +) -> Tree: |
| 95 | + """Performs Monte-Carlo tree search in an environment. |
| 96 | +
|
| 97 | + Args: |
| 98 | + forest (MCTSForest): Forest of the tree to update. If the tree does not |
| 99 | + exist yet, it is added. |
| 100 | + root (TensorDict): The root step of the tree to update. |
| 101 | + env (EnvBase): Environment to performs actions in. |
| 102 | + num_steps (int): Number of iterations to traverse. |
| 103 | + max_rollout_steps (int): Maximum number of steps for each rollout. |
| 104 | + """ |
| 105 | + for action in env.all_actions(root): |
| 106 | + td = env.step(env.reset(root.clone()).update(action)) |
| 107 | + forest.extend(td.unsqueeze(0)) |
| 108 | + |
| 109 | + tree = forest.get_tree(root) |
| 110 | + |
| 111 | + tree.wins = torch.zeros_like(td["next", env.reward_key]) |
| 112 | + for subtree in tree.subtree: |
| 113 | + subtree.wins = torch.zeros_like(td["next", env.reward_key]) |
| 114 | + |
| 115 | + for _ in range(num_steps): |
| 116 | + _traverse_MCTS_one_step(forest, tree, env, max_rollout_steps) |
| 117 | + |
| 118 | + return tree |
0 commit comments