diff --git a/axelrod/__init__.py b/axelrod/__init__.py index bdc39f202..8ec6e5ba4 100644 --- a/axelrod/__init__.py +++ b/axelrod/__init__.py @@ -7,18 +7,23 @@ from axelrod.action import Action from axelrod.random_ import random_choice, random_flip, seed, Pdf from axelrod.plot import Plot -from axelrod.game import DefaultGame, Game +from axelrod.base_game import BaseGame +from axelrod.game import DefaultGame, IpdGame from axelrod.history import History, LimitedHistory -from axelrod.player import Player +from axelrod.base_player import BasePlayer +from axelrod.player import IpdPlayer from axelrod.classifier import Classifiers from axelrod.evolvable_player import EvolvablePlayer from axelrod.mock_player import MockPlayer -from axelrod.match import Match +from axelrod.base_match import BaseMatch +from axelrod.match import IpdMatch from axelrod.moran import MoranProcess, ApproximateMoranProcess from axelrod.strategies import * from axelrod.deterministic_cache import DeterministicCache from axelrod.match_generator import * -from axelrod.tournament import Tournament +from axelrod.base_tournament import BaseTournament +from axelrod.tournament import IpdTournament from axelrod.result_set import ResultSet from axelrod.ecosystem import Ecosystem from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint +from axelrod.ipd_adapter import Player, Game, Match, Tournament diff --git a/axelrod/_strategy_utils.py b/axelrod/_strategy_utils.py index d028a072b..13db18d16 100644 --- a/axelrod/_strategy_utils.py +++ b/axelrod/_strategy_utils.py @@ -53,9 +53,9 @@ def inspect_strategy(inspector, opponent): Parameters ---------- - inspector: Player + inspector: IpdPlayer The player doing the inspecting - opponent: Player + opponent: IpdPlayer The player being inspected Returns @@ -82,9 +82,9 @@ def _limited_simulate_play(player_1, player_2, h1): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player whose move is already known. - player_2: Player + player_2: IpdPlayer The player the we want to inspect. h1: Action The next action for first player. @@ -99,9 +99,9 @@ def simulate_match(player_1, player_2, strategy, rounds=10): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player that will have a constant strategy. - player_2: Player + player_2: IpdPlayer The player we want to simulate. strategy: Action The constant strategy to use for first player. @@ -117,12 +117,12 @@ def _calculate_scores(p1, p2, game): Parameters ---------- - p1: Player + p1: IpdPlayer The first player. - p2: Player + p2: IpdPlayer The second player. - game: Game - Game object used to score rounds in the players' histories. + game: IpdGame + IpdGame object used to score rounds in the players' histories. Returns ------- @@ -142,12 +142,12 @@ def look_ahead(player_1, player_2, game, rounds=10): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player that will look ahead. - player_2: Player + player_2: IpdPlayer The opponent that will be inspected. - game: Game - The Game object used to score rounds. + game: IpdGame + The IpdGame object used to score rounds. rounds: int The number of rounds to look ahead. diff --git a/axelrod/base_game.py b/axelrod/base_game.py new file mode 100644 index 000000000..2099e5b14 --- /dev/null +++ b/axelrod/base_game.py @@ -0,0 +1,28 @@ +from typing import Tuple, Union + +import axelrod as axl + +Score = Union[int, float] + + +class BaseGame(object): + """Container for the scoring logic.""" + + def __init__(self) -> None: + """Create a new game object.""" + pass + + def score(self, pair: Tuple[axl.Action, axl.Action]) -> Tuple[Score, Score]: + """Returns the appropriate score for a decision pair. + + Parameters + ---------- + pair: tuple(Action, Action) + A pair actions for two players, for example (C, C). + + Returns + ------- + tuple of int or float + Scores for two player resulting from their actions. + """ + raise NotImplementedError() diff --git a/axelrod/base_match.py b/axelrod/base_match.py new file mode 100644 index 000000000..ef34c6b22 --- /dev/null +++ b/axelrod/base_match.py @@ -0,0 +1,73 @@ +from typing import Dict, List, Tuple, Union + +import axelrod as axl +from axelrod.base_game import BaseGame +from axelrod.base_player import BasePlayer + +Score = Union[int, float] + + +class BaseMatch(object): + """The BaseMatch class conducts matches between two players.""" + + def __init__( + self, + players: Tuple[BasePlayer], + turns: int = None, + prob_end: float = None, + game: BaseGame = None, + noise: float = 0, + match_attributes: Dict = None, + reset: bool = True, + ): + """ + Needs to be overwritten in derived class. + + Parameters + ---------- + players : tuple + A pair of axelrodPlayer objects + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + game : axelrod.BaseGame + The game object used to score the match + noise : float + The probability that a player's intended action should be flipped + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + reset : bool + Whether to reset players or not + """ + pass + + @property + def players(self) -> Tuple[BasePlayer]: + raise NotImplementedError() + + def play(self) -> List[Tuple[axl.Action]]: + """The resulting list of actions from a match between two players.""" + raise NotImplementedError() + + def scores(self) -> List[Score]: + """Returns the scores of the previous BaseMatch plays.""" + raise NotImplementedError() + + def final_score(self) -> Score: + """Returns the final score for a BaseMatch.""" + raise NotImplementedError() + + def final_score_per_turn(self) -> Score: + """Returns the mean score per round for a BaseMatch.""" + raise NotImplementedError() + + def winner(self) -> BasePlayer: + """Returns the winner of the IpdMatch.""" + raise NotImplementedError() + + def __len__(self) -> int: + """Number of turns in the match""" + raise NotImplementedError() diff --git a/axelrod/base_player.py b/axelrod/base_player.py new file mode 100644 index 000000000..1fce21b63 --- /dev/null +++ b/axelrod/base_player.py @@ -0,0 +1,39 @@ +import inspect +from typing import Optional, Tuple + +import axelrod as axl + + +class BasePlayer(object): + + def __init__(self): + pass + + def strategy(self, opponent: "BasePlayer") -> axl.Action: + """Calculates the action of this player against the provided + opponent.""" + raise NotImplementedError() + + def play( + self, + opponent: "BasePlayer", + noise: float = 0, + strategy_holder: Optional["BasePlayer"] = None, + ) -> Tuple[axl.Action, axl.Action]: + """This pits two players against each other, using the passed strategy + holder, if provided.""" + raise NotImplementedError() + + def clone(self) -> "BasePlayer": + """Clones the player without history, reapplying configuration + parameters as necessary.""" + raise NotImplementedError() + + def reset(self): + """Resets a player to its initial state + + This method is called at the beginning of each match (between a pair + of players) to reset a player's state to its initial starting point. + It ensures that no 'memory' of previous matches is carried forward. + """ + raise NotImplementedError() diff --git a/axelrod/base_tournament.py b/axelrod/base_tournament.py new file mode 100644 index 000000000..9b2852498 --- /dev/null +++ b/axelrod/base_tournament.py @@ -0,0 +1,79 @@ +from typing import List, Tuple + +import axelrod as axl +from axelrod.base_player import BasePlayer +from axelrod.base_game import BaseGame + + +class BaseTournament(object): + def __init__( + self, + players: List[BasePlayer], + name: str = "axelrod", + game: BaseGame = None, + turns: int = None, + prob_end: float = None, + repetitions: int = 10, + noise: float = 0, + edges: List[Tuple] = None, + match_attributes: dict = None, + ) -> None: + """ + Parameters + ---------- + players : list + A list of axelrodPlayer objects + name : string + A name for the tournament + game : axelrod.IpdGame + The game object used to score the tournament + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + repetitions : integer + The number of times the round robin should be repeated + noise : float + The probability that a player's intended action should be flipped + prob_end : float + The probability of a given turn ending a match + edges : list + A list of edges between players + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + """ + pass + + def setup_output(self, filename=None): + """assign/create `filename` to `self`. If file should be deleted once + `play` is finished, assign a file descriptor. """ + raise NotImplementedError() + + def play( + self, + build_results: bool = True, + filename: str = None, + processes: int = None, + progress_bar: bool = True, + ) -> 'ResultSet': + """ + Plays the tournament and passes the results to the ResultSet class + + Parameters + ---------- + build_results : bool + whether or not to build a results set + filename : string + name of output file + processes : integer + The number of processes to be used for parallel processing + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ------- + axelrod.ResultSet + """ + raise NotImplementedError() diff --git a/axelrod/classifier.py b/axelrod/classifier.py index a88dbe39e..c68073ddd 100644 --- a/axelrod/classifier.py +++ b/axelrod/classifier.py @@ -14,7 +14,7 @@ import warnings import yaml -from axelrod.player import Player +from axelrod.player import IpdPlayer ALL_CLASSIFIERS_PATH = "data/all_classifiers.yml" @@ -22,10 +22,10 @@ class Classifier(Generic[T]): - """Describes a Player (strategy). + """Describes a IpdPlayer (strategy). User sets a name and function, f, at initialization. Through - classify_player, looks for the classifier to be set in the passed Player + classify_player, looks for the classifier to be set in the passed IpdPlayer class. If not set, then passes to f for calculation. f must operate on the class, and not an instance. If necessary, f may @@ -38,18 +38,18 @@ class Classifier(Generic[T]): Attributes ---------- name: An identifier for the classifier, used as a dict key in storage and in - 'classifier' dicts of Player classes. - player_class_classifier: A function that takes in a Player class (not an + 'classifier' dicts of IpdPlayer classes. + player_class_classifier: A function that takes in a IpdPlayer class (not an instance) and returns a value. """ def __init__( - self, name: Text, player_class_classifier: Callable[[Type[Player]], T] + self, name: Text, player_class_classifier: Callable[[Type[IpdPlayer]], T] ): self.name = name self.player_class_classifier = player_class_classifier - def classify_player(self, player: Type[Player]) -> T: + def classify_player(self, player: Type[IpdPlayer]) -> T: """Look for this classifier in the passed player's 'classifier' dict, otherwise pass to the player to f.""" try: @@ -80,7 +80,7 @@ def classify_player(self, player: Type[Player]) -> T: def rebuild_classifier_table( classifiers: List[Classifier], - players: List[Type[Player]], + players: List[Type[IpdPlayer]], path: Text = ALL_CLASSIFIERS_PATH, ) -> None: """Builds the classifier table in data. @@ -142,7 +142,7 @@ def known_classifier(cls, classifier_name: Text) -> bool: @classmethod def __getitem__( cls, key: Union[Classifier, Text] - ) -> Callable[[Union[Player, Type[Player]]], Any]: + ) -> Callable[[Union[IpdPlayer, Type[IpdPlayer]]], Any]: """Looks up the classifier for the player. Given a passed classifier key, return a function that: @@ -152,7 +152,7 @@ def __getitem__( player in the all_player_dicts. Returns None if the classifier is not found in either of those. - The returned function expects Player instances, but if a Player class is + The returned function expects IpdPlayer instances, but if a IpdPlayer class is passed, then it will create an instance by calling an argument-less initializer. If no such initializer exists on the class, then an error will result. @@ -164,7 +164,7 @@ def __getitem__( Returns ------- - A function that will map Player (or Player instances) to their value for + A function that will map IpdPlayer (or IpdPlayer instances) to their value for this classification. """ # Key may be the name or an instance. Convert to name. @@ -175,7 +175,7 @@ def __getitem__( raise KeyError("Unknown classifier") def classify_player_for_this_classifier( - player: Union[Player, Type[Player]] + player: Union[IpdPlayer, Type[IpdPlayer]] ) -> Any: def try_lookup() -> Any: try: @@ -187,7 +187,7 @@ def try_lookup() -> Any: # If the passed player is not an instance, then try to initialize an # instance without arguments. - if not isinstance(player, Player): + if not isinstance(player, IpdPlayer): try: player = player() warnings.warn( @@ -214,7 +214,7 @@ def try_lookup() -> Any: return classify_player_for_this_classifier @classmethod - def is_basic(cls, s: Union[Player, Type[Player]]): + def is_basic(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): """ Defines criteria for a strategy to be considered 'basic' """ @@ -232,7 +232,7 @@ def is_basic(cls, s: Union[Player, Type[Player]]): ) @classmethod - def obey_axelrod(cls, s: Union[Player, Type[Player]]): + def obey_axelrod(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): """ A function to check if a strategy obeys Axelrod's original tournament rules. diff --git a/axelrod/deterministic_cache.py b/axelrod/deterministic_cache.py index 24fc53b7d..b792986eb 100644 --- a/axelrod/deterministic_cache.py +++ b/axelrod/deterministic_cache.py @@ -18,9 +18,9 @@ from axelrod import Classifiers from .action import Action -from .player import Player +from .player import IpdPlayer -CachePlayerKey = Tuple[Player, Player] +CachePlayerKey = Tuple[IpdPlayer, IpdPlayer] CacheKey = Tuple[str, str] @@ -38,7 +38,7 @@ def _key_transform(key: CachePlayerKey) -> CacheKey: def _is_valid_key(key: CachePlayerKey) -> bool: """Validate a deterministic cache player key. - The key should always be a 2-tuple, with a pair of axelrod.Player + The key should always be a 2-tuple, with a pair of axelrodPlayer instances and one integer. Both players should be deterministic. Parameters @@ -52,7 +52,7 @@ def _is_valid_key(key: CachePlayerKey) -> bool: if not isinstance(key, tuple) or len(key) != 2: return False - if not (isinstance(key[0], Player) and isinstance(key[1], Player)): + if not (isinstance(key[0], IpdPlayer) and isinstance(key[1], IpdPlayer)): return False if Classifiers["stochastic"](key[0]) or Classifiers["stochastic"](key[1]): @@ -89,8 +89,8 @@ class DeterministicCache(UserDict): By also storing those cached results in a file, we can re-use the cache between multiple tournaments if necessary. - The cache is a dictionary mapping pairs of Player classes to a list of - resulting interactions. e.g. for a 3 turn Match between Cooperator and + The cache is a dictionary mapping pairs of IpdPlayer classes to a list of + resulting interactions. e.g. for a 3 turn IpdMatch between Cooperator and Alternator, the dictionary entry would be: (axelrod.Cooperator, axelrod.Alternator): [(C, C), (C, D), (C, C)] @@ -132,7 +132,7 @@ def __setitem__(self, key: CachePlayerKey, value): if not _is_valid_key(key): raise ValueError( - "Key must be a tuple of 2 deterministic axelrod Player classes" + "Key must be a tuple of 2 deterministic axelrod IpdPlayer classes" ) if not _is_valid_value(value): diff --git a/axelrod/ecosystem.py b/axelrod/ecosystem.py index 4c3bfb907..1dadfff8f 100644 --- a/axelrod/ecosystem.py +++ b/axelrod/ecosystem.py @@ -5,7 +5,7 @@ tournament needs to happen before it is created. For example: players = [axelrod.Cooperator(), axlerod.Defector()] -tournament = axelrod.Tournament(players=players) +tournament = axelrod.IpdTournament(players=players) results = tournament.play() ecosystem = axelrod.Ecosystem(results) ecosystem.reproduce(100) @@ -33,7 +33,7 @@ def __init__( population: List[int] = None, ) -> None: """Create a new ecosystem. - + Parameters ---------- results: ResultSet @@ -83,7 +83,7 @@ def __init__( def reproduce(self, turns: int): """Reproduce populations according to the payoff matrix. - + Parameters ---------- turns: int diff --git a/axelrod/evolvable_player.py b/axelrod/evolvable_player.py index e80da1c69..68681b250 100644 --- a/axelrod/evolvable_player.py +++ b/axelrod/evolvable_player.py @@ -2,23 +2,23 @@ from pickle import dumps, loads from random import randrange from typing import Dict, List -from .player import Player +from .player import IpdPlayer class InsufficientParametersError(Exception): - """Error indicating that insufficient parameters were specified to initialize an Evolvable Player.""" + """Error indicating that insufficient parameters were specified to initialize an Evolvable IpdPlayer.""" def __init__(self, *args): super().__init__(*args) -class EvolvablePlayer(Player): +class EvolvablePlayer(IpdPlayer): """A class for a player that can evolve, for use in the Moran process or with reinforcement learning algorithms. This is an abstract base class, not intended to be used directly. """ name = "EvolvablePlayer" - parent_class = Player + parent_class = IpdPlayer parent_kwargs = [] # type: List[str] def overwrite_init_kwargs(self, **kwargs): @@ -43,25 +43,25 @@ def serialize_parameters(self): @classmethod def deserialize_parameters(cls, serialized): - """Deserialize parameters to a Player instance.""" + """Deserialize parameters to a IpdPlayer instance.""" init_kwargs = loads(base64.b64decode(serialized)) return cls(**init_kwargs) # Optional methods for evolutionary algorithms and Moran processes. def mutate(self): - """Optional method to allow Player to produce a variant (not in place).""" + """Optional method to allow IpdPlayer to produce a variant (not in place).""" pass # pragma: no cover def crossover(self, other): - """Optional method to allow Player to produce variants in combination with another player. Returns a new - Player.""" + """Optional method to allow IpdPlayer to produce variants in combination with another player. Returns a new + IpdPlayer.""" pass # pragma: no cover # Optional methods for particle swarm algorithm. def receive_vector(self, vector): - """Receive a vector of params and overwrite the Player.""" + """Receive a vector of params and overwrite the IpdPlayer.""" pass # pragma: no cover def create_vector_bounds(self): diff --git a/axelrod/fingerprint.py b/axelrod/fingerprint.py index 20390abfd..cd14ea414 100644 --- a/axelrod/fingerprint.py +++ b/axelrod/fingerprint.py @@ -10,7 +10,7 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable import axelrod as axl -from axelrod import Player +from axelrod import IpdPlayer from axelrod.interaction_utils import ( compute_final_score_per_turn, read_interactions_from_file, @@ -59,7 +59,7 @@ def _create_points(step: float, progress_bar: bool = True) -> List[Point]: return points -def _create_jossann(point: Point, probe: Any) -> Player: +def _create_jossann(point: Point, probe: Any) -> IpdPlayer: """Creates a JossAnn probe player that matches the Point. If the coordinates of point sums to more than 1 the parameters are @@ -70,8 +70,8 @@ def _create_jossann(point: Point, probe: Any) -> Player: ---------- point : Point probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. Returns ---------- @@ -80,7 +80,7 @@ def _create_jossann(point: Point, probe: Any) -> Player: """ x, y = point - if isinstance(probe, axl.Player): + if isinstance(probe, axl.IpdPlayer): init_kwargs = probe.init_kwargs probe = probe.__class__ else: @@ -96,8 +96,8 @@ def _create_jossann(point: Point, probe: Any) -> Player: def _create_probes( - probe: Union[type, Player], points: list, progress_bar: bool = True -) -> List[Player]: + probe: Union[type, IpdPlayer], points: list, progress_bar: bool = True +) -> List[IpdPlayer]: """Creates a set of probe strategies over the unit square. Constructs probe strategies that correspond to points with coordinates @@ -106,8 +106,8 @@ def _create_probes( Parameters ---------- probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. points : list of Point objects with coordinates (x, y) progress_bar : bool @@ -216,17 +216,17 @@ def _reshape_data(data: dict, points: list, size: int) -> np.ndarray: class AshlockFingerprint(object): def __init__( - self, strategy: Union[type, Player], probe: Union[type, Player] = axl.TitForTat + self, strategy: Union[type, IpdPlayer], probe: Union[type, IpdPlayer] = axl.TitForTat ) -> None: """ Parameters ---------- strategy : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. Default: Tit For Tat """ self.strategy = strategy @@ -254,7 +254,7 @@ def _construct_tournament_elements( corresponding probe (+1 to allow for including the Strategy). tournament_players : list - A list containing instances of axelrod.Player. The first item is the + A list containing instances of axelrodPlayer. The first item is the original player, the rest are the probes. """ @@ -264,7 +264,7 @@ def _construct_tournament_elements( self.probe, self.points, progress_bar=progress_bar ) - if isinstance(self.strategy, axl.Player): + if isinstance(self.strategy, axl.IpdPlayer): tournament_players = [self.strategy] + probe_players else: tournament_players = [self.strategy()] + probe_players @@ -321,7 +321,7 @@ def fingerprint( ) self.step = step - self.spatial_tournament = axl.Tournament( + self.spatial_tournament = axl.IpdTournament( tourn_players, turns=turns, repetitions=repetitions, edges=edges ) self.spatial_tournament.play( @@ -404,8 +404,8 @@ def __init__(self, strategy, opponents=None, number_of_opponents=50): Parameters ---------- strategy : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. opponents : list of instances A list that contains a list of opponents Default: A spectrum of Random players @@ -460,7 +460,7 @@ def fingerprint( and the jth column the jth turn. """ - if isinstance(self.strategy, axl.Player): + if isinstance(self.strategy, axl.IpdPlayer): players = [self.strategy] + self.opponents else: players = [self.strategy()] + self.opponents @@ -470,7 +470,7 @@ def fingerprint( temp_file_descriptor, filename = mkstemp() # type: ignore edges = [(0, k + 1) for k in range(len(self.opponents))] - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=players, edges=edges, turns=turns, diff --git a/axelrod/game.py b/axelrod/game.py index 1c3278275..c6487e783 100644 --- a/axelrod/game.py +++ b/axelrod/game.py @@ -1,13 +1,18 @@ from typing import Tuple, Union -from axelrod import Action +from axelrod import Action, BaseGame C, D = Action.C, Action.D Score = Union[int, float] -class Game(object): +class Game(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.game.Game to axelrod.Game") + + +class IpdGame(BaseGame): """Container for the game matrix and scoring logic. Attributes @@ -18,7 +23,7 @@ class Game(object): def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: """Create a new game object. - + Parameters ---------- r: int or float @@ -31,6 +36,7 @@ def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> No Score obtained by both player for mutual defection. """ self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} + super().__init__() def RPST(self) -> Tuple[Score, Score, Score, Score]: """Returns game matrix values in Press and Dyson notation.""" @@ -59,9 +65,9 @@ def __repr__(self) -> str: return "Axelrod game: (R,P,S,T) = {}".format(self.RPST()) def __eq__(self, other): - if not isinstance(other, Game): + if not isinstance(other, IpdGame): return False return self.RPST() == other.RPST() -DefaultGame = Game() +DefaultGame = IpdGame() diff --git a/axelrod/interaction_utils.py b/axelrod/interaction_utils.py index 82159f94c..f6416be2c 100644 --- a/axelrod/interaction_utils.py +++ b/axelrod/interaction_utils.py @@ -4,7 +4,7 @@ [(C, D), (D, C),...] -This is used by both the Match class and the ResultSet class which analyse +This is used by both the IpdMatch class and the ResultSet class which analyse interactions. """ from collections import Counter, defaultdict @@ -13,7 +13,7 @@ import tqdm from axelrod.action import Action, str_to_actions -from .game import Game +from .game import IpdGame C, D = Action.C, Action.D @@ -21,7 +21,7 @@ def compute_scores(interactions, game=None): """Returns the scores of a given set of interactions.""" if not game: - game = Game() + game = IpdGame() return [game.score(plays) for plays in interactions] @@ -53,7 +53,7 @@ def compute_final_score_per_turn(interactions, game=None): def compute_winner_index(interactions, game=None): - """Returns the index of the winner of the Match""" + """Returns the index of the winner of the IpdMatch""" scores = compute_final_score(interactions, game) if scores is not None: diff --git a/axelrod/ipd_adapter.py b/axelrod/ipd_adapter.py new file mode 100644 index 000000000..2bce2883c --- /dev/null +++ b/axelrod/ipd_adapter.py @@ -0,0 +1,428 @@ +"""This is an adapter for historical API on Player, Game, Match, and Tournament + +For each of these classes, we keep a copy of the Ipd version of them as an +element, and translate the historical API to the current API on the Ipd version. +This keeps legacy code working as the internal API shifts to accommodate a more +general class of games. +""" + +import copy +import inspect +from typing import Dict, List, Tuple, Union + +import axelrod as axl + +Score = Union[int, float] + + +class Player(axl.IpdPlayer): + """Legacy players derive from this adapter.""" + + def __new__(cls, *args, **kwargs): + """Caches arguments for IpdPlayer cloning.""" + obj = super().__new__(cls) + obj.init_kwargs = cls.init_params(*args, **kwargs) + return obj + + @classmethod + def init_params(cls, *args, **kwargs): + """ + Return a dictionary containing the init parameters of a strategy + (without 'self'). + Use *args and *kwargs as value if specified + and complete the rest with the default values. + """ + sig = inspect.signature(cls.__init__) + # The 'self' parameter needs to be removed or the first *args will be + # assigned to it + self_param = sig.parameters.get("self") + new_params = list(sig.parameters.values()) + new_params.remove(self_param) + sig = sig.replace(parameters=new_params) + boundargs = sig.bind_partial(*args, **kwargs) + boundargs.apply_defaults() + return boundargs.arguments + + def __init__(self): + self._player = axl.IpdPlayer() + + def strategy(self, opponent: axl.IpdPlayer) -> axl.Action: + """We expect the derived class to set this behavior.""" + raise NotImplementedError() + + def play( + self, opponent: axl.IpdPlayer, noise: float = 0 + ) -> Tuple[axl.Action, axl.Action]: + # We have to provide _player.play a copy of this strategy, which will + # have an overwritten strategy, and possibly saved state and helper + # methods. + return self._player.play(opponent, noise, strategy_holder=self) + + def clone(self) -> "Player": + """Clones the player without history, reapplying configuration + parameters as necessary.""" + + # You may be tempted to re-implement using the `copy` module + # Note that this would require a deepcopy in some cases and there may + # be significant changes required throughout the library. + # Consider overriding in special cases only if necessary + cls = self.__class__ + new_player = cls(**self.init_kwargs) + new_player._player.match_attributes = copy.copy(self.match_attributes) + return new_player + + def reset(self): + self._player.reset() + + def set_match_attributes( + self, length: int = -1, game: "Game" = None, noise: float = 0 + ) -> None: + self._player.set_match_attributes(length, game, noise) + + def update_history(self, play: axl.Action, coplay: axl.Action) -> None: + self._player.update_history(play, coplay) + + @property + def history(self): + return self._player.history + + @property + def match_attributes(self): + return self._player.match_attributes + + @match_attributes.setter + def match_attributes(self, match_attributes): + self._player.match_attributes = match_attributes + + @property + def cooperations(self): + return self._player.cooperations + + @property + def defections(self): + return self._player.defections + + @property + def name(self): + return self._player.name + + @name.setter + def name(self, name): + self._player.name = name + + @property + def classifier(self): + return self._player.classifier + + @classifier.setter + def classifier(self, classifier): + self._player.classifier = classifier + + @property + def state_distribution(self): + return self._player.state_distribution + + def __eq__(self, other: "Player") -> bool: + if not isinstance(other, Player): + return False + return self._player == other._player + + +class Game(object): + def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1): + self._game = axl.IpdGame(r, s, t, p) + + def score(self, pair: Tuple[axl.Action, axl.Action]) -> Tuple[Score, Score]: + return self._game.score(pair) + + def RPST(self) -> Tuple[Score, Score, Score, Score]: + return self._game.RPST() + + @property + def scores(self): + return self._game.scores + + @scores.setter + def scores(self, scores): + self._game.scores = scores + + def __repr__(self) -> str: + return repr(self._game) + + def __eq__(self, other: "Game") -> bool: + if not isinstance(other, Game): + return False + return self._game == other._game + + +class Match(object): + def __init__( + self, + players: Tuple[axl.IpdPlayer], + turns: int = None, + prob_end: float = None, + game: axl.IpdGame = None, + deterministic_cache: axl.DeterministicCache = None, + noise: float = 0, + match_attributes: Dict = None, + reset: bool = True, + ): + self._match = axl.IpdMatch( + players, + turns, + prob_end, + game, + deterministic_cache, + noise, + match_attributes, + reset, + ) + + @property + def players(self) -> Tuple[axl.IpdPlayer]: + return self._match.players + + @players.setter + def players(self, players: Tuple[axl.IpdPlayer]): + self._match.players = players + + @property + def result(self): + return self._match.result + + @result.setter + def result(self, result): + self._match.result = result + + @property + def noise(self): + return self._match.noise + + @noise.setter + def noise(self, noise): + self._match.noise = noise + + @property + def game(self): + return self._match.game + + @game.setter + def game(self, game): + self._match.game = game + + @property + def _cache(self): + return self._match._cache + + @_cache.setter + def _cache(self, _cache): + self._match._cache = _cache + + @property + def _cache_update_required(self): + return self._match._cache_update_required + + @property + def _stochastic(self): + return self._match._stochastic + + @property + def prob_end(self): + return self._match.prob_end + + @prob_end.setter + def prob_end(self, prob_end): + self._match.prob_end = prob_end + + @property + def turns(self): + return self._match.turns + + @turns.setter + def turns(self, turns): + self._match.turns = turns + + @property + def reset(self): + return self._match.reset + + @reset.setter + def reset(self, reset): + self._match.reset = reset + + def play(self) -> List[Tuple[axl.Action]]: + return self._match.play() + + def scores(self) -> List[Score]: + return self._match.scores() + + def final_score(self) -> Score: + return self._match.final_score() + + def final_score_per_turn(self) -> Score: + return self._match.final_score_per_turn() + + def winner(self) -> axl.IpdPlayer: + return self._match.winner() + + def cooperation(self): + return self._match.cooperation() + + def normalised_cooperation(self): + return self._match.normalised_cooperation() + + def state_distribution(self): + return self._match.state_distribution() + + def normalised_state_distribution(self): + return self._match.normalised_state_distribution() + + def sparklines(self, c_symbol="█", d_symbol=" "): + return self._match.sparklines(c_symbol=c_symbol, d_symbol=d_symbol) + + def __len__(self): + return len(self._match) + + +class Tournament(object): + def __init__( + self, + players: List[axl.IpdPlayer], + name: str = "axelrod", + game: axl.IpdGame = None, + turns: int = None, + prob_end: float = None, + repetitions: int = 10, + noise: float = 0, + edges: List[Tuple] = None, + match_attributes: dict = None, + ) -> None: + self._tournament = axl.IpdTournament( + players, + name, + game, + turns, + prob_end, + repetitions, + noise, + edges, + match_attributes, + ) + + def setup_output(self, filename=None) -> None: + self._tournament.setup_output(filename) + + def play( + self, + build_results: bool = True, + filename: str = None, + processes: int = None, + progress_bar: bool = True, + ) -> axl.ResultSet: + return self._tournament.play( + build_results, filename, processes, progress_bar + ) + + @property + def players(self): + return self._tournament.players + + @players.setter + def players(self, players): + self._tournament.players = players + + @property + def game(self): + return self._tournament.game + + @game.setter + def game(self, game): + self._tournament.game = game + + @property + def turns(self): + return self._tournament.turns + + @turns.setter + def turns(self, turns): + self._tournament.turns = turns + + @property + def repetitions(self): + return self._tournament.repetitions + + @repetitions.setter + def repetitions(self, repetitions): + self._tournament.repetitions = repetitions + + @property + def name(self): + return self._tournament.name + + @name.setter + def name(self, name): + self._tournament.name = name + + @property + def _logger(self): + return self._tournament._logger + + @property + def noise(self): + return self._tournament.noise + + @noise.setter + def noise(self, noise): + self._tournament.noise = noise + + @property + def match_generator(self): + return self._tournament.match_generator + + @match_generator.setter + def match_generator(self, match_generator): + self._tournament.match_generator = match_generator + + @property + def _temp_file_descriptor(self): + return self._tournament._temp_file_descriptor + + @property + def num_interactions(self): + return self._tournament.num_interactions + + @num_interactions.setter + def num_interactions(self, num_interactions): + self._tournament.num_interactions = num_interactions + + @property + def use_progress_bar(self): + return self._tournament.use_progress_bar + + @use_progress_bar.setter + def use_progress_bar(self, use_progress_bar): + self._tournament.use_progress_bar = use_progress_bar + + @property + def filename(self): + return self._tournament.filename + + @filename.setter + def filename(self, filename): + self._tournament.filename = filename + + @property + def edges(self): + return self._tournament.edges + + @edges.setter + def edges(self, edges): + self._tournament.edges = edges + + @property + def prob_end(self): + return self._tournament.prob_end + + @prob_end.setter + def prob_end(self, prob_end): + self._tournament.prob_end = prob_end diff --git a/axelrod/match.py b/axelrod/match.py index e9dd29442..73ba23808 100644 --- a/axelrod/match.py +++ b/axelrod/match.py @@ -5,20 +5,26 @@ from axelrod import DEFAULT_TURNS from axelrod.action import Action from axelrod import Classifiers -from axelrod.game import Game +from axelrod.game import IpdGame +from axelrod.base_match import BaseMatch from .deterministic_cache import DeterministicCache C, D = Action.C, Action.D +class Match(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.match.Match to axelrod.Match") + + def is_stochastic(players, noise): """Determines if a match is stochastic -- true if there is noise or if any of the players involved is stochastic.""" return noise or any(map(Classifiers["stochastic"], players)) -class Match(object): - """The Match class conducts matches between two players.""" +class IpdMatch(BaseMatch): + """The IpdMatch class conducts matches between two players.""" def __init__( self, @@ -35,12 +41,12 @@ def __init__( Parameters ---------- players : tuple - A pair of axelrod.Player objects + A pair of Player objects turns : integer The number of turns per match prob_end : float The probability of a given turn ending a match - game : axelrod.Game + game : axelrod.IpdGame The game object used to score the match deterministic_cache : axelrod.DeterministicCache A cache of resulting actions for deterministic matches @@ -66,7 +72,7 @@ def __init__( self.noise = noise if game is None: - self.game = Game() + self.game = IpdGame() else: self.game = game @@ -88,6 +94,10 @@ def __init__( self.players = list(players) self.reset = reset + super().__init__( + players, turns, prob_end, game, noise, match_attributes, reset + ) + @property def players(self): return self._players @@ -169,19 +179,19 @@ def play(self): return result def scores(self): - """Returns the scores of the previous Match plays.""" + """Returns the scores of the previous IpdMatch plays.""" return iu.compute_scores(self.result, self.game) def final_score(self): - """Returns the final score for a Match.""" + """Returns the final score for a IpdMatch.""" return iu.compute_final_score(self.result, self.game) def final_score_per_turn(self): - """Returns the mean score per round for a Match.""" + """Returns the mean score per round for a IpdMatch.""" return iu.compute_final_score_per_turn(self.result, self.game) def winner(self): - """Returns the winner of the Match.""" + """Returns the winner of the IpdMatch.""" winner_index = iu.compute_winner_index(self.result, self.game) if winner_index is False: # No winner return False @@ -243,7 +253,7 @@ def sample_length(prob_end): . Note that this corresponds to sampling at the end of every turn whether - or not the Match ends. + or not the IpdMatch ends. """ if prob_end == 0: return float("inf") diff --git a/axelrod/match_generator.py b/axelrod/match_generator.py index d74c143fd..d4dca9dc1 100644 --- a/axelrod/match_generator.py +++ b/axelrod/match_generator.py @@ -11,18 +11,18 @@ def __init__( match_attributes=None, ): """ - A class to generate matches. This is used by the Tournament class which + A class to generate matches. This is used by the IpdTournament class which is in charge of playing the matches and collecting the results. Parameters ---------- players : list - A list of axelrod.Player objects + A list of axelrodPlayer objects repetitions : int The number of repetitions of a given match turns : integer The number of turns per match - game : axelrod.Game + game : axelrod.IpdGame The game object used to score the match noise : float, 0 The probability that a player's intended action should be flipped diff --git a/axelrod/mock_player.py b/axelrod/mock_player.py index 41ee0de2a..b83a44bba 100644 --- a/axelrod/mock_player.py +++ b/axelrod/mock_player.py @@ -2,17 +2,17 @@ from typing import List from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class MockPlayer(Player): +class MockPlayer(IpdPlayer): """Creates a mock player that plays a given sequence of actions. If no actions are given, plays like Cooperator. Used for testing. """ - name = "Mock Player" + name = "Mock IpdPlayer" def __init__(self, actions: List[Action] = None) -> None: super().__init__() @@ -20,7 +20,7 @@ def __init__(self, actions: List[Action] = None) -> None: actions = [] self.actions = cycle(actions) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Return the next saved action, if present. try: action = self.actions.__next__() diff --git a/axelrod/moran.py b/axelrod/moran.py index 1c0bdc487..1ea3c7e40 100644 --- a/axelrod/moran.py +++ b/axelrod/moran.py @@ -6,11 +6,11 @@ import matplotlib.pyplot as plt import numpy as np -from axelrod import EvolvablePlayer, DEFAULT_TURNS, Game, Player +from axelrod import EvolvablePlayer, DEFAULT_TURNS, IpdGame, IpdPlayer from .deterministic_cache import DeterministicCache from .graph import Graph, complete_graph -from .match import Match +from .match import IpdMatch from .random_ import randrange @@ -45,11 +45,11 @@ def fitness_proportionate_selection( class MoranProcess(object): def __init__( self, - players: List[Player], + players: List[IpdPlayer], turns: int = DEFAULT_TURNS, prob_end: float = None, noise: float = 0, - game: Game = None, + game: IpdGame = None, deterministic_cache: DeterministicCache = None, mutation_rate: float = 0.0, mode: str = "bd", @@ -61,7 +61,7 @@ def __init__( ) -> None: """ An agent based Moran process class. In each round, each player plays a - Match with each other player. Players are assigned a fitness score by + IpdMatch with each other player. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly chosen player. @@ -78,7 +78,7 @@ def __init__( It is possible to pass interaction graphs and reproduction graphs to the Moran process. In this case, in each round, each player plays a - Match with each neighboring player according to the interaction graph. + IpdMatch with each neighboring player according to the interaction graph. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly @@ -94,7 +94,7 @@ def __init__( noise: The background noise, if any. Randomly flips plays with probability `noise`. - game: axelrod.Game + game: axelrod.IpdGame The game object used to score matches. deterministic_cache: A optional prebuilt deterministic cache @@ -182,7 +182,7 @@ def set_players(self) -> None: self.players.append(player) self.populations = [self.population_distribution()] - def mutate(self, index: int) -> Player: + def mutate(self, index: int) -> IpdPlayer: """Mutate the player at index. Parameters @@ -356,7 +356,7 @@ def score_all(self) -> List: for i, j in self._matchup_indices(): player1 = self.players[i] player2 = self.players[j] - match = Match( + match = IpdMatch( (player1, player2), turns=self.turns, prob_end=self.prob_end, @@ -469,14 +469,14 @@ def populations_plot(self, ax=None): class ApproximateMoranProcess(MoranProcess): """ A class to approximate a Moran process based - on a distribution of potential Match outcomes. + on a distribution of potential IpdMatch outcomes. Instead of playing the matches, the result is sampled from a dictionary of player tuples to distribution of match outcomes """ def __init__( - self, players: List[Player], cached_outcomes: dict, mutation_rate: float = 0 + self, players: List[IpdPlayer], cached_outcomes: dict, mutation_rate: float = 0 ) -> None: """ Parameters diff --git a/axelrod/player.py b/axelrod/player.py index 61cc609af..13dc23f96 100644 --- a/axelrod/player.py +++ b/axelrod/player.py @@ -6,6 +6,7 @@ import numpy as np +from axelrod.base_player import BasePlayer from axelrod.action import Action from axelrod.game import DefaultGame from axelrod.history import History @@ -14,6 +15,11 @@ C, D = Action.C, Action.D +class Player(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.player.Player to axelrod.Player") + + def simultaneous_play(player, coplayer, noise=0): """This pits two players against each other.""" s1, s2 = player.strategy(coplayer), coplayer.strategy(player) @@ -25,17 +31,16 @@ def simultaneous_play(player, coplayer, noise=0): return s1, s2 -class Player(object): +class IpdPlayer(BasePlayer): """A class for a player in the tournament. This is an abstract base class, not intended to be used directly. """ - name = "Player" + name = "IpdPlayer" classifier = {} # type: Dict[str, Any] - def __new__(cls, *args, **kwargs): - """Caches arguments for Player cloning.""" + """Caches arguments for IpdPlayer cloning.""" obj = super().__new__(cls) obj.init_kwargs = cls.init_params(*args, **kwargs) return obj @@ -64,6 +69,7 @@ def __init__(self): self._history = History() self.classifier = copy.deepcopy(self.classifier) self.set_match_attributes() + super().__init__() def __eq__(self, other): """ @@ -72,7 +78,9 @@ def __eq__(self, other): if self.__repr__() != other.__repr__(): return False - for attribute in set(list(self.__dict__.keys()) + list(other.__dict__.keys())): + for attribute in set( + list(self.__dict__.keys()) + list(other.__dict__.keys()) + ): value = getattr(self, attribute, None) other_value = getattr(other, attribute, None) @@ -86,14 +94,20 @@ def __eq__(self, other): ): # Split the original generator so it is not touched generator, original_value = itertools.tee(value) - other_generator, original_other_value = itertools.tee(other_value) + other_generator, original_other_value = itertools.tee( + other_value + ) if isinstance(value, types.GeneratorType): setattr(self, attribute, (ele for ele in original_value)) - setattr(other, attribute, (ele for ele in original_other_value)) + setattr( + other, attribute, (ele for ele in original_other_value) + ) else: setattr(self, attribute, itertools.cycle(original_value)) - setattr(other, attribute, itertools.cycle(original_other_value)) + setattr( + other, attribute, itertools.cycle(original_other_value) + ) for _ in range(200): try: @@ -128,10 +142,12 @@ def __repr__(self): Appends the `__init__` parameters to the strategy's name.""" name = self.name prefix = ": " - gen = (value for value in self.init_kwargs.values() if value is not None) + gen = ( + value for value in self.init_kwargs.values() if value is not None + ) for value in gen: try: - if issubclass(value, Player): + if issubclass(value, IpdPlayer): value = value.name except TypeError: pass @@ -140,16 +156,19 @@ def __repr__(self): return name def __getstate__(self): - """Used for pickling. Override if Player contains unpickleable attributes.""" + """Used for pickling. Override if IpdPlayer contains unpickleable attributes.""" return self.__dict__ def strategy(self, opponent): """This is a placeholder strategy.""" raise NotImplementedError() - def play(self, opponent, noise=0): - """This pits two players against each other.""" - return simultaneous_play(self, opponent, noise) + def play(self, opponent, noise=0, strategy_holder=None): + """This pits two players against each other, using the passed strategy + holder, if provided.""" + if strategy_holder is None: + strategy_holder = self + return simultaneous_play(strategy_holder, opponent, noise) def clone(self): """Clones the player without history, reapplying configuration diff --git a/axelrod/result_set.py b/axelrod/result_set.py index 36e33a4b7..4df530ef7 100644 --- a/axelrod/result_set.py +++ b/axelrod/result_set.py @@ -12,7 +12,7 @@ import dask as da import dask.dataframe as dd -from . import eigen +from axelrod import eigen C, D = Action.C, Action.D @@ -677,7 +677,7 @@ def summarise(self): median_wins = map(np.nanmedian, self.wins) self.player = namedtuple( - "Player", + "IpdPlayer", [ "Rank", "Name", diff --git a/axelrod/strategies/__init__.py b/axelrod/strategies/__init__.py index 1e8e1a4ff..1906a1950 100644 --- a/axelrod/strategies/__init__.py +++ b/axelrod/strategies/__init__.py @@ -1,4 +1,4 @@ -from ..classifier import Classifiers +from axelrod.classifier import Classifiers from ._strategies import * from ._filters import passes_filterset @@ -121,13 +121,13 @@ def filtered_strategies(filterset, strategies=all_strategies): 'min_memory_depth': 2 } strategies: list - of subclasses of axelrod.Player + of subclasses of axelrodPlayer Returns ------- list - of subclasses of axelrod.Player + of subclasses of axelrodPlayer """ return [s for s in strategies if passes_filterset(s, filterset)] diff --git a/axelrod/strategies/_filters.py b/axelrod/strategies/_filters.py index c9c199bf7..5a150dd9a 100644 --- a/axelrod/strategies/_filters.py +++ b/axelrod/strategies/_filters.py @@ -13,7 +13,7 @@ def passes_operator_filter(player, classifier_key, value, operator): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -27,7 +27,7 @@ class ExampleStrategy(Player): Parameters ---------- - player : an instance of axelrod.Player + player : an instance of axelrodPlayer classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'memory_depth'). @@ -58,7 +58,7 @@ def passes_in_list_filter(player, classifier_key, value): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -73,7 +73,7 @@ class ExampleStrategy(Player): Parameters ---------- - player: a descendant class of axelrod.Player + player: a descendant class of axelrodPlayer classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'makes_use_of'). @@ -101,7 +101,7 @@ def passes_filterset(strategy, filterset): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -123,7 +123,7 @@ class ExampleStrategy(Player): Parameters ---------- - strategy : a descendant class of axelrod.Player + strategy : a descendant class of axelrodPlayer filterset : dict mapping filter name to criterion. e.g. diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py index 85e58b4b4..aa772edc5 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/strategies/_strategies.py @@ -106,6 +106,7 @@ Thumper, ) from .finite_state_machines import ( # pylint: disable=unused-import + SimpleFSM, EvolvableFSMPlayer, FSMPlayer, ) @@ -145,7 +146,7 @@ from .grumpy import Grumpy from .handshake import Handshake from .hmm import EvolvedHMM5 -from .hmm import EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import +from .hmm import SimpleHMM, EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import from .human import Human # pylint: disable=unused-import from .hunter import ( AlternatorHunter, diff --git a/axelrod/strategies/adaptive.py b/axelrod/strategies/adaptive.py index bef1b8dac..31aedccfc 100644 --- a/axelrod/strategies/adaptive.py +++ b/axelrod/strategies/adaptive.py @@ -1,12 +1,12 @@ from typing import List from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Adaptive(Player): +class Adaptive(IpdPlayer): """Start with a specific sequence of C and D, then play the strategy that has worked best, recalculated each turn. @@ -34,7 +34,7 @@ def __init__(self, initial_plays: List[Action] = None) -> None: self.initial_plays = initial_plays self.scores = {C: 0, D: 0} - def score_last_round(self, opponent: Player): + def score_last_round(self, opponent: IpdPlayer): # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] if len(self.history): @@ -42,7 +42,7 @@ def score_last_round(self, opponent: Player): scores = game.score(last_round) self.scores[last_round[0]] += scores[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Update scores from the last play self.score_last_round(opponent) # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D diff --git a/axelrod/strategies/adaptor.py b/axelrod/strategies/adaptor.py index 2648b2704..9eaae0be6 100644 --- a/axelrod/strategies/adaptor.py +++ b/axelrod/strategies/adaptor.py @@ -1,7 +1,7 @@ from typing import Dict, Tuple from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from numpy import heaviside @@ -9,7 +9,7 @@ C, D = Action.C, Action.D -class AbstractAdaptor(Player): +class AbstractAdaptor(IpdPlayer): """ An adaptive strategy that updates an internal state based on the last round of play. Using this state the player Cooperates with a probability @@ -46,7 +46,7 @@ def __init__(self, delta: Dict[Tuple[Action, Action], float], self.delta = delta self.s = 0. - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.history: # Update internal state from the last play last_round = (self.history[-1], opponent.history[-1]) diff --git a/axelrod/strategies/alternator.py b/axelrod/strategies/alternator.py index 6ed3605a4..a25c4c968 100644 --- a/axelrod/strategies/alternator.py +++ b/axelrod/strategies/alternator.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Alternator(Player): +class Alternator(IpdPlayer): """ A player who alternates between cooperating and defecting. @@ -25,7 +25,7 @@ class Alternator(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if self.history[-1] == C: diff --git a/axelrod/strategies/ann.py b/axelrod/strategies/ann.py index 2d3a1bc85..3398f5761 100644 --- a/axelrod/strategies/ann.py +++ b/axelrod/strategies/ann.py @@ -4,7 +4,7 @@ from axelrod.action import Action from axelrod.load_data_ import load_weights from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D @@ -19,17 +19,17 @@ def num_weights(num_features, num_hidden): return size -def compute_features(player: Player, opponent: Player) -> List[int]: +def compute_features(player: IpdPlayer, opponent: IpdPlayer) -> List[int]: """ Compute history features for Neural Network: * Opponent's first move is C * Opponent's first move is D * Opponent's second move is C * Opponent's second move is D - * Player's previous move is C - * Player's previous move is D - * Player's second previous move is C - * Player's second previous move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D * Opponent's previous move is C * Opponent's previous move is D * Opponent's second previous move is C @@ -148,7 +148,7 @@ def split_weights( return input2hidden, hidden2output, bias -class ANN(Player): +class ANN(IpdPlayer): """Artificial Neural Network based strategy. A single layer neural network based strategy, with the following @@ -157,10 +157,10 @@ class ANN(Player): * Opponent's first move is D * Opponent's second move is C * Opponent's second move is D - * Player's previous move is C - * Player's previous move is D - * Player's second previous move is C - * Player's second previous move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D * Opponent's previous move is C * Opponent's previous move is D * Opponent's second previous move is C @@ -206,7 +206,7 @@ def _process_weights(self, weights, num_features, num_hidden): self.hidden_to_output_layer_weights = np.array(h2o) self.bias_weights = np.array(bias) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: features = compute_features(self, opponent) output = activate( self.bias_weights, diff --git a/axelrod/strategies/apavlov.py b/axelrod/strategies/apavlov.py index 0b54b10a9..db7ceb190 100644 --- a/axelrod/strategies/apavlov.py +++ b/axelrod/strategies/apavlov.py @@ -1,12 +1,12 @@ from typing import Optional from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class APavlov2006(Player): +class APavlov2006(IpdPlayer): """ APavlov attempts to classify its opponent as one of five strategies: Cooperative, ALLD, STFT, PavlovD, or Random. APavlov then responds in a @@ -33,7 +33,7 @@ def __init__(self) -> None: super().__init__() self.opponent_class = None # type: Optional[str] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C @@ -70,7 +70,7 @@ def strategy(self, opponent: Player) -> Action: return C -class APavlov2011(Player): +class APavlov2011(IpdPlayer): """ APavlov attempts to classify its opponent as one of four strategies: Cooperative, ALLD, STFT, or Random. APavlov then responds in a manner @@ -97,7 +97,7 @@ def __init__(self) -> None: super().__init__() self.opponent_class = None # type: Optional[str] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/strategies/appeaser.py b/axelrod/strategies/appeaser.py index 790b8439d..6e21054fa 100644 --- a/axelrod/strategies/appeaser.py +++ b/axelrod/strategies/appeaser.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Appeaser(Player): +class Appeaser(IpdPlayer): """A player who tries to guess what the opponent wants. Switch the classifier every time the opponent plays D. @@ -26,7 +26,7 @@ class Appeaser(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not len(opponent.history): return C else: diff --git a/axelrod/strategies/averagecopier.py b/axelrod/strategies/averagecopier.py index 597407c61..95dd67900 100644 --- a/axelrod/strategies/averagecopier.py +++ b/axelrod/strategies/averagecopier.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class AverageCopier(Player): +class AverageCopier(IpdPlayer): """ The player will cooperate with probability p if the opponent's cooperation ratio is p. Starts with random decision. @@ -26,7 +26,7 @@ class AverageCopier(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: # Randomly picks a strategy (not affected by history). return random_choice(0.5) @@ -34,7 +34,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(p) -class NiceAverageCopier(Player): +class NiceAverageCopier(IpdPlayer): """ Same as Average Copier, but always starts by cooperating. @@ -54,7 +54,7 @@ class NiceAverageCopier(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C p = opponent.cooperations / len(opponent.history) diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py index f9dab0b94..792f80f10 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/strategies/axelrod_first.py @@ -17,7 +17,7 @@ from typing import Dict, List, Tuple, Optional from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from axelrod.strategy_transformers import FinalTransformer from scipy.stats import chisquare @@ -27,7 +27,7 @@ C, D = Action.C, Action.D -class FirstByDavis(Player): +class FirstByDavis(IpdPlayer): """ Submitted to Axelrod's first tournament by Morton Davis. @@ -64,7 +64,7 @@ def __init__(self, rounds_to_cooperate: int = 10) -> None: super().__init__() self._rounds_to_cooperate = rounds_to_cooperate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for the remaining rounds if the opponent ever plays D.""" if len(self.history) < self._rounds_to_cooperate: @@ -74,7 +74,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByDowning(Player): +class FirstByDowning(IpdPlayer): """ Submitted to Axelrod's first tournament by Downing @@ -89,7 +89,7 @@ class FirstByDowning(Player): > based on an outcome maximization interpretation of human performances proposed > by Downing (1975)." - The Downing (1975) paper is "The Prisoner's Dilemma Game as a + The Downing (1975) paper is "The Prisoner's Dilemma IpdGame as a Problem-Solving Phenomenon" [Downing1975]_ and this is used to implement the strategy. @@ -248,7 +248,7 @@ def __init__(self) -> None: self.number_opponent_cooperations_in_response_to_C = 0 self.number_opponent_cooperations_in_response_to_D = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 if round_number == 1: @@ -284,7 +284,7 @@ def strategy(self, opponent: Player) -> Action: return self.history[-1].flip() -class FirstByFeld(Player): +class FirstByFeld(IpdPlayer): """ Submitted to Axelrod's first tournament by Scott Feld. @@ -349,7 +349,7 @@ def _cooperation_probability(self) -> float: rounds = len(self.history) return max(self._start_coop_prob + slope * rounds, self._end_coop_prob) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C if opponent.history[-1] == D: @@ -358,7 +358,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(p) -class FirstByGraaskamp(Player): +class FirstByGraaskamp(IpdPlayer): """ Submitted to Axelrod's first tournament by James Graaskamp. @@ -423,7 +423,7 @@ def __init__(self, alpha: float = 0.05) -> None: self.opponent_is_random = False self.next_random_defection_turn = None # type: Optional[int] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" # First move if not self.history: @@ -459,7 +459,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByGrofman(Player): +class FirstByGrofman(IpdPlayer): """ Submitted to Axelrod's first tournament by Bernard Grofman. @@ -485,7 +485,7 @@ class FirstByGrofman(Player): "manipulates_source": False, "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: return C return random_choice(2 / 7) @@ -523,7 +523,7 @@ def __init__(self, p: float = 0.9) -> None: super().__init__(four_vector) -class FirstByNydegger(Player): +class FirstByNydegger(IpdPlayer): """ Submitted to Axelrod's first tournament by Rudy Nydegger. @@ -606,7 +606,7 @@ def score_history( a += weight * score_map[plays] return a - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) == 1: @@ -624,7 +624,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByShubik(Player): +class FirstByShubik(IpdPlayer): """ Submitted to Axelrod's first tournament by Martin Shubik. @@ -688,7 +688,7 @@ def _decrease_retaliation_counter(self): if self.retaliation_remaining == 0: self.is_retaliating = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -709,7 +709,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByTullock(Player): +class FirstByTullock(IpdPlayer): """ Submitted to Axelrod's first tournament by Gordon Tullock. @@ -756,7 +756,7 @@ def __init__(self) -> None: self._rounds_to_cooperate = 11 self.memory_depth = self._rounds_to_cooperate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) < self._rounds_to_cooperate: return C rounds = self._rounds_to_cooperate - 1 @@ -766,7 +766,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(prob_cooperate) -class FirstByAnonymous(Player): +class FirstByAnonymous(IpdPlayer): """ Submitted to Axelrod's first tournament by a graduate student whose name was withheld. @@ -802,13 +802,13 @@ class FirstByAnonymous(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: r = random.uniform(3, 7) / 10 return random_choice(r) @FinalTransformer((D, D), name_prefix=None) -class FirstBySteinAndRapoport(Player): +class FirstBySteinAndRapoport(IpdPlayer): """ Submitted to Axelrod's first tournament by William Stein and Amnon Rapoport. @@ -858,7 +858,7 @@ def __init__(self, alpha: float = 0.05) -> None: self.alpha = alpha self.opponent_is_random = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 # First 4 moves @@ -880,7 +880,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D, D), name_prefix=None) -class FirstByTidemanAndChieruzzi(Player): +class FirstByTidemanAndChieruzzi(IpdPlayer): """ Submitted to Axelrod's first tournament by Nicolas Tideman and Paula Chieruzzi. @@ -960,7 +960,7 @@ def _fresh_start(self): self.retaliation_remaining = 0 self.remembered_number_of_opponent_defectioons = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] @@ -969,7 +969,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/strategies/axelrod_second.py index 50e4e18ee..3e85149ec 100644 --- a/axelrod/strategies/axelrod_second.py +++ b/axelrod/strategies/axelrod_second.py @@ -10,14 +10,14 @@ import numpy as np from axelrod.action import Action from axelrod.interaction_utils import compute_final_score -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from axelrod.strategies.finite_state_machines import FSMPlayer C, D = Action.C, Action.D -class SecondByChampion(Player): +class SecondByChampion(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Danny Champion. @@ -43,7 +43,7 @@ class SecondByChampion(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) # Cooperate for the first 10 turns if current_round == 0: @@ -61,7 +61,7 @@ def strategy(self, opponent: Player) -> Action: return D return C -class SecondByEatherley(Player): +class SecondByEatherley(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Graham Eatherley. @@ -87,7 +87,7 @@ class SecondByEatherley(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not len(opponent.history): return C @@ -100,7 +100,7 @@ def strategy(opponent: Player) -> Action: return random_choice(1 - defection_prop) -class SecondByTester(Player): +class SecondByTester(IpdPlayer): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -131,7 +131,7 @@ def __init__(self) -> None: super().__init__() self.is_TFT = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Defect on the first move if not opponent.history: return D @@ -149,7 +149,7 @@ def strategy(self, opponent: Player) -> Action: return self.history[-1].flip() -class SecondByGladstein(Player): +class SecondByGladstein(IpdPlayer): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -185,7 +185,7 @@ def __init__(self) -> None: # This strategy assumes the opponent is a patsy self.patsy = True - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Defect on the first move if not self.history: return D @@ -205,7 +205,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SecondByTranquilizer(Player): +class SecondByTranquilizer(IpdPlayer): """ Submitted to Axelrod's second tournament by Craig Feathers @@ -376,7 +376,7 @@ def update_state(self, opponent): ) / (self.one_turn_after_good_defection_ratio_count + 1) self.one_turn_after_good_defection_ratio_count += 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C @@ -420,7 +420,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SecondByGrofman(Player): +class SecondByGrofman(IpdPlayer): """ Submitted to Axelrod's second tournament by Bernard Grofman. @@ -459,7 +459,7 @@ class SecondByGrofman(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Cooperate on the first two moves if len(self.history) < 2: return C @@ -478,7 +478,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByKluepfel(Player): +class SecondByKluepfel(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Charles Kluepfel (K32R). @@ -527,7 +527,7 @@ def __init__(self): super().__init__() self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = 0, 0, 0, 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First update the response matrix. if len(self.history) >= 2: if self.history[-2] == D: @@ -583,7 +583,7 @@ def strategy(self, opponent: Player) -> Action: return one_move_ago.flip() -class SecondByBorufsen(Player): +class SecondByBorufsen(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Otto Borufsen (K32R), and came in third in that tournament. @@ -657,7 +657,7 @@ def try_return(self, to_return): return C return D - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: @@ -739,7 +739,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class SecondByCave(Player): +class SecondByCave(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and came in fourth in that tournament. @@ -771,7 +771,7 @@ class SecondByCave(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -796,7 +796,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByWmAdams(Player): +class SecondByWmAdams(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by William Adams (K44R), and came in fifth in that tournament. @@ -822,7 +822,7 @@ class SecondByWmAdams(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) <= 1: return C number_defects = opponent.defections @@ -836,7 +836,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByGraaskampKatzen(Player): +class SecondByGraaskampKatzen(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken Katzen (K60R), and came in sixth in that tournament. @@ -874,12 +874,12 @@ def __init__(self): self.own_score = 0 self.mode = "Normal" - def update_score(self, opponent: Player): + def update_score(self, opponent: IpdPlayer): game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) self.own_score += game.score(last_round)[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.mode == "Defect": return D @@ -909,7 +909,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] # Tit-for-Tat -class SecondByWeiner(Player): +class SecondByWeiner(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R), and came in seventh in that tournament. @@ -969,7 +969,7 @@ def try_return(self, to_return): return D return to_return - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C @@ -1001,7 +1001,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class SecondByHarrington(Player): +class SecondByHarrington(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R) and came in eighth in that tournament. @@ -1238,7 +1238,7 @@ def detect_parity_streak(self, last_move): if self.parity_streak[self.parity_bit] >= self.parity_limit: return True - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: @@ -1340,7 +1340,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(D, lower_flags=False) -class SecondByTidemanAndChieruzzi(Player): +class SecondByTidemanAndChieruzzi(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman and Paula Chieruzzi (K84R) and came in ninth in that tournament. @@ -1395,7 +1395,7 @@ def _fresh_start(self): self.score_to_beat = 0 self.score_to_beat_inc = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] @@ -1404,7 +1404,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: @@ -1451,7 +1451,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByGetzler(Player): +class SecondByGetzler(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R) and came in eleventh in that tournament. @@ -1479,7 +1479,7 @@ def __init__(self) -> None: super().__init__() self.flack = 0.0 # The relative untrustworthiness of opponent - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -1489,7 +1489,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(1.0 - self.flack) -class SecondByLeyvraz(Player): +class SecondByLeyvraz(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Fransois Leyvraz (K68R) and came in twelfth in that tournament. @@ -1532,7 +1532,7 @@ def __init__(self) -> None: (D, D, D): 0.25, # Rule 1 } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: recent_history = [C, C, C] # Default to C. for go_back in range(1, 4): if len(opponent.history) >= go_back: @@ -1543,7 +1543,7 @@ def strategy(self, opponent: Player) -> Action: ) -class SecondByWhite(Player): +class SecondByWhite(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Edward C White (K72R) and came in thirteenth in that tournament. @@ -1569,7 +1569,7 @@ class SecondByWhite(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn <= 10 or opponent.history[-1] == C: @@ -1580,7 +1580,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByBlack(Player): +class SecondByBlack(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Paul E Black (K83R) and came in fifteenth in that tournament. @@ -1613,7 +1613,7 @@ def __init__(self) -> None: # Cooperation probability self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04} - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 5: return C @@ -1625,7 +1625,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(self.prob_coop[number_defects]) -class SecondByRichardHufford(Player): +class SecondByRichardHufford(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R) and came in sixteenth in that tournament. @@ -1691,7 +1691,7 @@ def __init__(self) -> None: self.coop_after_ab_count = 2 self.def_after_ab_count = 2 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -1739,7 +1739,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByYamachi(Player): +class SecondByYamachi(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R) and came in seventeenth in that tournament. @@ -1811,7 +1811,7 @@ def try_return(self, to_return, opp_def): return to_return - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return self.try_return(C, 0) @@ -1930,7 +1930,7 @@ def __init__(self) -> None: super().__init__() self.credit = 7 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -1956,7 +1956,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByRowsam(Player): +class SecondByRowsam(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R) and came in 21st in that tournament. @@ -2007,7 +2007,7 @@ def __init__(self) -> None: self.current_score = 0 self.opponent_score = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) @@ -2015,7 +2015,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn > 1: self._score_last_round(opponent) @@ -2061,7 +2061,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByAppold(Player): +class SecondByAppold(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and came in 22nd in that tournament. @@ -2106,7 +2106,7 @@ def __init__(self) -> None: self.first_opp_def = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 us_two_turns_ago = C if turn <= 2 else self.history[-2] diff --git a/axelrod/strategies/backstabber.py b/axelrod/strategies/backstabber.py index b3a522ae2..5fdb5e900 100644 --- a/axelrod/strategies/backstabber.py +++ b/axelrod/strategies/backstabber.py @@ -1,12 +1,12 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.strategy_transformers import FinalTransformer C, D = Action.C, Action.D @FinalTransformer((D, D), name_prefix=None) # End with two defections -class BackStabber(Player): +class BackStabber(IpdPlayer): """ Forgives the first 3 defections but on the fourth will defect forever. Defects on the last 2 rounds unconditionally. @@ -27,12 +27,12 @@ class BackStabber(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return _backstabber_strategy(opponent) @FinalTransformer((D, D), name_prefix=None) # End with two defections -class DoubleCrosser(Player): +class DoubleCrosser(IpdPlayer): """ Forgives the first 3 defections but on the fourth will defect forever. Defects on the last 2 rounds unconditionally. @@ -57,13 +57,13 @@ class DoubleCrosser(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if _opponent_triggers_alt_strategy(opponent): return _alt_strategy(opponent) return _backstabber_strategy(opponent) -def _backstabber_strategy(opponent: Player) -> Action: +def _backstabber_strategy(opponent: IpdPlayer) -> Action: """ Cooperates until opponent defects a total of four times, then always defects. @@ -75,7 +75,7 @@ def _backstabber_strategy(opponent: Player) -> Action: return C -def _alt_strategy(opponent: Player) -> Action: +def _alt_strategy(opponent: IpdPlayer) -> Action: """ If opponent's previous two plays were defect, then defects on next round. Otherwise, cooperates. @@ -86,7 +86,7 @@ def _alt_strategy(opponent: Player) -> Action: return C -def _opponent_triggers_alt_strategy(opponent: Player) -> bool: +def _opponent_triggers_alt_strategy(opponent: IpdPlayer) -> bool: """ If opponent did not defect in first 7 rounds and the current round is from 8 to 180, return True. Else, return False. @@ -99,7 +99,7 @@ def _opponent_triggers_alt_strategy(opponent: Player) -> bool: return before_alt_strategy < current_round <= last_round_of_alt_strategy -def _opponent_defected_in_first_n_rounds(opponent: Player, first_n_rounds: int) -> bool: +def _opponent_defected_in_first_n_rounds(opponent: IpdPlayer, first_n_rounds: int) -> bool: """ If opponent defected in the first N rounds, return True. Else return False. """ diff --git a/axelrod/strategies/better_and_better.py b/axelrod/strategies/better_and_better.py index 1af697a00..2c0216517 100644 --- a/axelrod/strategies/better_and_better.py +++ b/axelrod/strategies/better_and_better.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class BetterAndBetter(Player): +class BetterAndBetter(IpdPlayer): """ Defects with probability of '(1000 - current turn) / 1000'. Therefore it is less and less likely to defect as the round goes on. @@ -26,7 +26,7 @@ class BetterAndBetter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 probability = current_round / 1000 return random_choice(probability) diff --git a/axelrod/strategies/bush_mosteller.py b/axelrod/strategies/bush_mosteller.py index d6ed5adf3..b62800926 100644 --- a/axelrod/strategies/bush_mosteller.py +++ b/axelrod/strategies/bush_mosteller.py @@ -2,12 +2,12 @@ from axelrod import random_choice from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class BushMosteller(Player): +class BushMosteller(IpdPlayer): """ A player that is based on Bush Mosteller reinforced learning algorithm, it decides what it will @@ -72,14 +72,14 @@ def __init__( self._stimulus = 0.0 self._learning_rate = learning_rate - def stimulus_update(self, opponent: Player): + def stimulus_update(self, opponent: IpdPlayer): """ Updates the stimulus attribute based on the opponent's history. Used by the strategy. Parameters - opponent : axelrod.Player + opponent : axelrodPlayer The current opponent """ game = self.match_attributes["game"] @@ -120,7 +120,7 @@ def stimulus_update(self, opponent: Player): elif self._stimulus < 0: self._d_prob += self._learning_rate * self._stimulus * self._d_prob - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First turn if len(self.history) == 0: diff --git a/axelrod/strategies/calculator.py b/axelrod/strategies/calculator.py index 8ac9b59d0..162cf1b21 100644 --- a/axelrod/strategies/calculator.py +++ b/axelrod/strategies/calculator.py @@ -1,13 +1,13 @@ from axelrod._strategy_utils import detect_cycle from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from .axelrod_first import FirstByJoss as Joss C, D = Action.C, Action.D -class Calculator(Player): +class Calculator(IpdPlayer): """ Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is detected, defect forever. Otherwise play TFT. @@ -33,7 +33,7 @@ def __init__(self) -> None: super().__init__() self.joss_instance = Joss() - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn > 0: self.joss_instance.history.append(self.history[-1], @@ -47,7 +47,7 @@ def strategy(self, opponent: Player) -> Action: play = self.joss_instance.strategy(opponent) return play - def extended_strategy(self, opponent: Player) -> Action: + def extended_strategy(self, opponent: IpdPlayer) -> Action: if self.cycle: return D else: diff --git a/axelrod/strategies/cooperator.py b/axelrod/strategies/cooperator.py index 6435a3504..5a6675449 100644 --- a/axelrod/strategies/cooperator.py +++ b/axelrod/strategies/cooperator.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Cooperator(Player): +class Cooperator(IpdPlayer): """A player who only ever cooperates. Names: @@ -26,11 +26,11 @@ class Cooperator(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C -class TrickyCooperator(Player): +class TrickyCooperator(IpdPlayer): """ A cooperator that is trying to be tricky. @@ -53,7 +53,7 @@ class TrickyCooperator(Player): _min_history_required_to_try_trickiness = 3 _max_history_depth_for_trickiness = -10 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Almost always cooperates, but will try to trick the opponent by defecting. diff --git a/axelrod/strategies/cycler.py b/axelrod/strategies/cycler.py index 509141717..a892db1f4 100644 --- a/axelrod/strategies/cycler.py +++ b/axelrod/strategies/cycler.py @@ -5,13 +5,13 @@ from axelrod.action import Action, actions_to_str, str_to_actions from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D actions = (C, D) -class AntiCycler(Player): +class AntiCycler(IpdPlayer): """ A player that follows a sequence of plays that contains no cycles: CDD CD CCD CCCD CCCCD ... @@ -42,7 +42,7 @@ def __init__(self) -> None: def _get_first_three() -> List[Action]: return [C, D, D] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: while self.first_three: return self.first_three.pop(0) if self.cycle_counter < self.cycle_length: @@ -54,7 +54,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Cycler(Player): +class Cycler(IpdPlayer): """ A player that repeats a given sequence indefinitely. @@ -89,7 +89,7 @@ def __init__(self, cycle: str = "CCD") -> None: self.cycle = cycle self.set_cycle(cycle=cycle) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return next(self.cycle_iter) def set_cycle(self, cycle: str): @@ -156,7 +156,7 @@ def mutate(self) -> EvolvablePlayer: def crossover(self, other) -> EvolvablePlayer: """ - Creates and returns a new Player instance with a single crossover point. + Creates and returns a new IpdPlayer instance with a single crossover point. """ if other.__class__ != self.__class__: raise TypeError("Crossover must be between the same player classes.") diff --git a/axelrod/strategies/darwin.py b/axelrod/strategies/darwin.py index 93db009eb..a86d394e9 100644 --- a/axelrod/strategies/darwin.py +++ b/axelrod/strategies/darwin.py @@ -7,12 +7,12 @@ from typing import Optional from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Darwin(Player): +class Darwin(IpdPlayer): """ A strategy which accumulates a record (the 'genome') of what the most favourable response in the previous round should have been, and naively @@ -61,7 +61,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: trial = len(self.history) if trial > 0: diff --git a/axelrod/strategies/dbs.py b/axelrod/strategies/dbs.py index dafdb8ec2..346163962 100644 --- a/axelrod/strategies/dbs.py +++ b/axelrod/strategies/dbs.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class DBS(Player): +class DBS(IpdPlayer): """ A strategy that learns the opponent's strategy and uses symbolic noise detection for detecting whether anomalies in player’s behavior are @@ -210,7 +210,7 @@ def compute_prob_rule(self, outcome, alpha=1): p_cond = discounted_g / discounted_f return p_cond - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if not self.history: return C diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py index 4e05184f8..d771079ba 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/strategies/defector.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Defector(Player): +class Defector(IpdPlayer): """A player who only ever defects. Names: @@ -26,11 +26,11 @@ class Defector(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D -class TrickyDefector(Player): +class TrickyDefector(IpdPlayer): """A defector that is trying to be tricky. Names: @@ -49,7 +49,7 @@ class TrickyDefector(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Almost always defects, but will try to trick the opponent into cooperating. diff --git a/axelrod/strategies/doubler.py b/axelrod/strategies/doubler.py index 27c9ad847..a53dc932b 100644 --- a/axelrod/strategies/doubler.py +++ b/axelrod/strategies/doubler.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Doubler(Player): +class Doubler(IpdPlayer): """ Cooperates except when the opponent has defected and the opponent's cooperation count is less than twice their defection count. @@ -25,7 +25,7 @@ class Doubler(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if ( diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py index e7bdd7f63..0b511a8fa 100644 --- a/axelrod/strategies/finite_state_machines.py +++ b/axelrod/strategies/finite_state_machines.py @@ -5,7 +5,7 @@ from numpy.random import choice from axelrod.action import Action from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D actions = (C, D) @@ -92,10 +92,10 @@ def num_states(self): return len(set(state for state, action in self._state_transitions)) -class FSMPlayer(Player): +class FSMPlayer(IpdPlayer): """Abstract base class for finite state machine players.""" - name = "FSM Player" + name = "FSM IpdPlayer" classifier = { "memory_depth": 1, @@ -118,7 +118,7 @@ def __init__( self.initial_action = initial_action self.fsm = SimpleFSM(transitions, initial_state) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return self.initial_action else: diff --git a/axelrod/strategies/forgiver.py b/axelrod/strategies/forgiver.py index 4d2cb7ed8..2f10fce4a 100644 --- a/axelrod/strategies/forgiver.py +++ b/axelrod/strategies/forgiver.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Forgiver(Player): +class Forgiver(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected more than 10 percent of the time @@ -25,7 +25,7 @@ class Forgiver(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D if the opponent has defected more than 10 percent of the time. @@ -35,7 +35,7 @@ def strategy(self, opponent: Player) -> Action: return C -class ForgivingTitForTat(Player): +class ForgivingTitForTat(IpdPlayer): """ A player starts by cooperating however will defect if at any point, the opponent has defected more than 10 percent of the time, and their most @@ -57,7 +57,7 @@ class ForgivingTitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D if the opponent has defected more than 10 percent of the time and their most recent decision was defect. diff --git a/axelrod/strategies/gambler.py b/axelrod/strategies/gambler.py index f127ce0f4..07d1ff470 100644 --- a/axelrod/strategies/gambler.py +++ b/axelrod/strategies/gambler.py @@ -9,7 +9,7 @@ from axelrod.action import Action, str_to_actions, actions_to_str from axelrod.load_data_ import load_pso_tables -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice @@ -40,7 +40,7 @@ class Gambler(LookerUp): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: actions_or_float = super(Gambler, self).strategy(opponent) if isinstance(actions_or_float, Action): return actions_or_float diff --git a/axelrod/strategies/geller.py b/axelrod/strategies/geller.py index 8343f8aa1..e90f92e9d 100644 --- a/axelrod/strategies/geller.py +++ b/axelrod/strategies/geller.py @@ -6,13 +6,13 @@ from axelrod._strategy_utils import inspect_strategy from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class Geller(Player): +class Geller(IpdPlayer): """Observes what the player will do in the next round and adjust. If unable to do this: will play randomly. @@ -54,7 +54,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return random_choice(0.5) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Look at what the opponent will play in the next round and choose a strategy that gives the least jail time, which is is equivalent to playing the same diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py index e79efca0a..ed6ac9dfa 100644 --- a/axelrod/strategies/gobymajority.py +++ b/axelrod/strategies/gobymajority.py @@ -2,12 +2,12 @@ from typing import Any, Dict, Union from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class GoByMajority(Player): +class GoByMajority(IpdPlayer): """Submitted to Axelrod's second tournament by Gail Grisell. It came 23rd and was written in 10 lines of BASIC. @@ -70,7 +70,7 @@ def __init__( def __repr__(self): return self.name - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is affected by the history of the opponent. As long as the opponent cooperates at least as often as they defect then diff --git a/axelrod/strategies/gradualkiller.py b/axelrod/strategies/gradualkiller.py index 975f43c52..72e7001a3 100644 --- a/axelrod/strategies/gradualkiller.py +++ b/axelrod/strategies/gradualkiller.py @@ -1,17 +1,17 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.strategy_transformers import InitialTransformer C, D = Action.C, Action.D @InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) -class GradualKiller(Player): +class GradualKiller(IpdPlayer): """ It begins by defecting in the first five moves, then cooperates two times. It then defects all the time if the opponent has defected in move 6 and 7, else cooperates all the time. - Initially designed to stop Gradual from defeating TitForTat in a 3 Player + Initially designed to stop Gradual from defeating TitForTat in a 3 IpdPlayer tournament. Names @@ -31,7 +31,7 @@ class GradualKiller(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if opponent.history[5:7] == [D, D]: return D return C diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py index 61215bb9a..854b8e38a 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/strategies/grudger.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Grudger(Player): +class Grudger(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected. @@ -33,7 +33,7 @@ class Grudger(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -41,7 +41,7 @@ def strategy(opponent: Player) -> Action: return C -class ForgetfulGrudger(Player): +class ForgetfulGrudger(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after mem_length matches. @@ -69,7 +69,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for mem_length rounds if the opponent ever plays D.""" if self.grudge_memory == self.mem_length: @@ -85,7 +85,7 @@ def strategy(self, opponent: Player) -> Action: return C -class OppositeGrudger(Player): +class OppositeGrudger(IpdPlayer): """ A player starts by defecting however will cooperate if at any point the opponent has cooperated. @@ -107,7 +107,7 @@ class OppositeGrudger(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing D, then plays C for the remaining rounds if the opponent ever plays C.""" if opponent.cooperations: @@ -115,7 +115,7 @@ def strategy(opponent: Player) -> Action: return D -class Aggravater(Player): +class Aggravater(IpdPlayer): """ Grudger, except that it defects on the first 3 turns @@ -136,7 +136,7 @@ class Aggravater(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if len(opponent.history) < 3: return D elif opponent.defections: @@ -144,7 +144,7 @@ def strategy(opponent: Player) -> Action: return C -class SoftGrudger(Player): +class SoftGrudger(IpdPlayer): """ A modification of the Grudger strategy. Instead of punishing by always defecting: punishes by playing: D, D, D, D, C, C. (Will continue to @@ -170,7 +170,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D, D, D, D, C, C against a defection """ if self.grudged: @@ -186,7 +186,7 @@ def strategy(self, opponent: Player) -> Action: return C -class GrudgerAlternator(Player): +class GrudgerAlternator(IpdPlayer): """ A player starts by cooperating until the first opponents defection, then alternates D-C. @@ -208,7 +208,7 @@ class GrudgerAlternator(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays Alternator for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -217,7 +217,7 @@ def strategy(self, opponent: Player) -> Action: return C -class EasyGo(Player): +class EasyGo(IpdPlayer): """ A player starts by defecting however will cooperate if at any point the opponent has defected. @@ -241,7 +241,7 @@ class EasyGo(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing D, then plays C for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -249,7 +249,7 @@ def strategy(opponent: Player) -> Action: return D -class GeneralSoftGrudger(Player): +class GeneralSoftGrudger(IpdPlayer): """ A generalization of the SoftGrudger strategy. SoftGrudger punishes by playing: D, D, D, D, C, C. after a defection by the opponent. @@ -296,7 +296,7 @@ def __init__(self, n: int = 1, d: int = 4, c: int = 2) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Punishes after its opponent defects 'n' times consecutively. The punishment is in the form of 'd' defections followed by a penance of diff --git a/axelrod/strategies/grumpy.py b/axelrod/strategies/grumpy.py index 6c4adaf5f..8730148cb 100644 --- a/axelrod/strategies/grumpy.py +++ b/axelrod/strategies/grumpy.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Grumpy(Player): +class Grumpy(IpdPlayer): """ A player that defects after a certain level of grumpiness. Grumpiness increases when the opponent defects and decreases @@ -49,7 +49,7 @@ def __init__( self.grumpy_threshold = grumpy_threshold self.nice_threshold = nice_threshold - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """A player that gets grumpier the more the opposition defects, and nicer the more they cooperate. diff --git a/axelrod/strategies/handshake.py b/axelrod/strategies/handshake.py index 61a0d8219..0b6da3a49 100644 --- a/axelrod/strategies/handshake.py +++ b/axelrod/strategies/handshake.py @@ -1,12 +1,12 @@ from typing import List from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Handshake(Player): +class Handshake(IpdPlayer): """Starts with C, D. If the opponent plays the same way, cooperate forever, else defect forever. @@ -32,7 +32,7 @@ def __init__(self, initial_plays: List[Action] = None) -> None: initial_plays = [C, D] self.initial_plays = initial_plays - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Begin by playing the sequence C, D index = len(self.history) if index < len(self.initial_plays): diff --git a/axelrod/strategies/hmm.py b/axelrod/strategies/hmm.py index 8ae2ed811..59f7e9942 100644 --- a/axelrod/strategies/hmm.py +++ b/axelrod/strategies/hmm.py @@ -4,7 +4,7 @@ from axelrod.action import Action from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists, crossover_lists -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice, random_vector C, D = Action.C, Action.D @@ -90,7 +90,7 @@ def is_well_formed(self) -> bool: return False return True - def __eq__(self, other: Player) -> bool: + def __eq__(self, other: IpdPlayer) -> bool: """Equality of two HMMs""" check = True for attr in [ @@ -120,16 +120,16 @@ def move(self, opponent_action: Action) -> Action: return action -class HMMPlayer(Player): +class HMMPlayer(IpdPlayer): """ Abstract base class for Hidden Markov Model players. Names - - HMM Player: Original name by Marc Harper + - HMM IpdPlayer: Original name by Marc Harper """ - name = "HMM Player" + name = "HMM IpdPlayer" classifier = { "memory_depth": 1, @@ -176,7 +176,7 @@ def is_stochastic(self) -> bool: return True return False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return self.initial_action else: diff --git a/axelrod/strategies/human.py b/axelrod/strategies/human.py index 753194248..ddf367b8d 100644 --- a/axelrod/strategies/human.py +++ b/axelrod/strategies/human.py @@ -1,7 +1,7 @@ from os import linesep from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from prompt_toolkit import prompt from prompt_toolkit.validation import ValidationError, Validator @@ -39,7 +39,7 @@ def validate(self, document) -> None: raise ValidationError(message="Action must be C or D", cursor_position=0) -class Human(Player): +class Human(IpdPlayer): """ A strategy that prompts for keyboard input rather than deriving its own action. @@ -147,7 +147,7 @@ def _get_human_input(self) -> Action: # pragma: no cover return Action.from_char(action.upper()) - def strategy(self, opponent: Player, input_function=None): + def strategy(self, opponent: IpdPlayer, input_function=None): """ Ordinarily, the strategy prompts for keyboard input rather than deriving its own action. diff --git a/axelrod/strategies/hunter.py b/axelrod/strategies/hunter.py index fe2b39fa1..65fbe7c7a 100644 --- a/axelrod/strategies/hunter.py +++ b/axelrod/strategies/hunter.py @@ -2,12 +2,12 @@ from axelrod._strategy_utils import detect_cycle from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class DefectorHunter(Player): +class DefectorHunter(IpdPlayer): """A player who hunts for defectors. Names: @@ -26,13 +26,13 @@ class DefectorHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) >= 4 and len(opponent.history) == opponent.defections: return D return C -class CooperatorHunter(Player): +class CooperatorHunter(IpdPlayer): """A player who hunts for cooperators. Names: @@ -51,7 +51,7 @@ class CooperatorHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) >= 4 and len(opponent.history) == opponent.cooperations: return D return C @@ -64,7 +64,7 @@ def is_alternator(history: List[Action]) -> bool: return True -class AlternatorHunter(Player): +class AlternatorHunter(IpdPlayer): """A player who hunts for alternators. Names: @@ -87,7 +87,7 @@ def __init__(self) -> None: super().__init__() self.is_alt = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 6: return C if len(self.history) == 6: @@ -98,7 +98,7 @@ def strategy(self, opponent: Player) -> Action: return C -class CycleHunter(Player): +class CycleHunter(IpdPlayer): """Hunts strategies that play cyclically, like any of the Cyclers, Alternator, etc. @@ -122,7 +122,7 @@ def __init__(self) -> None: super().__init__() self.cycle = None # type: Optional[Tuple[Action]] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.cycle: return D cycle = detect_cycle(opponent.history, min_size=3) @@ -143,7 +143,7 @@ class EventualCycleHunter(CycleHunter): name = "Eventual Cycle Hunter" - def strategy(self, opponent: Player) -> None: + def strategy(self, opponent: IpdPlayer) -> None: if len(opponent.history) < 10: return C if len(opponent.history) == opponent.cooperations: @@ -157,7 +157,7 @@ def strategy(self, opponent: Player) -> None: return C -class MathConstantHunter(Player): +class MathConstantHunter(IpdPlayer): """A player who hunts for mathematical constant players. Names: @@ -176,7 +176,7 @@ class MathConstantHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Check whether the number of cooperations in the first and second halves of the history are close. The variance of the uniform distribution (1/4) @@ -206,7 +206,7 @@ def strategy(self, opponent: Player) -> Action: return C -class RandomHunter(Player): +class RandomHunter(IpdPlayer): """A player who hunts for random players. Names: @@ -230,7 +230,7 @@ def __init__(self) -> None: self.countDD = 0 super().__init__() - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ A random player is unpredictable, which means the conditional frequency of cooperation after cooperation, and defection after defections, should diff --git a/axelrod/strategies/inverse.py b/axelrod/strategies/inverse.py index 092309330..8f7d566d1 100644 --- a/axelrod/strategies/inverse.py +++ b/axelrod/strategies/inverse.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class Inverse(Player): +class Inverse(IpdPlayer): """A player who defects with a probability that diminishes relative to how long ago the opponent defected. @@ -26,7 +26,7 @@ class Inverse(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Looks at opponent history to see if they have defected. If so, player defection is inversely proportional to when this occurred. diff --git a/axelrod/strategies/lookerup.py b/axelrod/strategies/lookerup.py index b66d06293..69555fea7 100644 --- a/axelrod/strategies/lookerup.py +++ b/axelrod/strategies/lookerup.py @@ -7,7 +7,7 @@ from axelrod.action import Action, actions_to_str, str_to_actions from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_dictionaries -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D @@ -227,7 +227,7 @@ def create_lookup_table_keys( } -class LookerUp(Player): +class LookerUp(IpdPlayer): """ This strategy uses a LookupTable to decide its next action. If there is not enough history to use the table, it calls from a list of @@ -361,7 +361,7 @@ def _get_initial_actions(self, initial_actions: tuple) -> tuple: return initial_actions + tuple([C] * initial_actions_shortfall) return initial_actions[:table_depth] - def strategy(self, opponent: Player) -> Reaction: + def strategy(self, opponent: IpdPlayer) -> Reaction: turn_index = len(opponent.history) while turn_index < len(self._initial_actions_pool): return self._initial_actions_pool[turn_index] @@ -573,7 +573,7 @@ def __init__(self) -> None: super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C)) -def get_last_n_plays(player: Player, depth: int) -> tuple: +def get_last_n_plays(player: IpdPlayer, depth: int) -> tuple: """Returns the last N plays of player as a tuple.""" if depth == 0: return () diff --git a/axelrod/strategies/mathematicalconstants.py b/axelrod/strategies/mathematicalconstants.py index 8f88d1a2b..839e957c8 100644 --- a/axelrod/strategies/mathematicalconstants.py +++ b/axelrod/strategies/mathematicalconstants.py @@ -1,12 +1,12 @@ import math from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class CotoDeRatio(Player): +class CotoDeRatio(IpdPlayer): """The player will always aim to bring the ratio of co-operations to defections closer to the ratio as given in a sub class @@ -25,7 +25,7 @@ class CotoDeRatio(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Initially cooperate if len(opponent.history) == 0: return C diff --git a/axelrod/strategies/memoryone.py b/axelrod/strategies/memoryone.py index 8fa4aeae6..c5bad7526 100644 --- a/axelrod/strategies/memoryone.py +++ b/axelrod/strategies/memoryone.py @@ -5,13 +5,13 @@ from typing import Tuple from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class MemoryOnePlayer(Player): +class MemoryOnePlayer(IpdPlayer): """ Uses a four-vector for strategies based on the last round of play, (P(C|CC), P(C|CD), P(C|DC), P(C|DD)). Win-Stay Lose-Shift is set as @@ -24,7 +24,7 @@ class MemoryOnePlayer(Player): - Memory One: [Nowak1990]_ """ - name = "Generic Memory One Player" + name = "Generic Memory One IpdPlayer" classifier = { "memory_depth": 1, # Memory-one Four-Vector "stochastic": True, @@ -73,7 +73,7 @@ def set_initial_four_vector(self, four_vector): warnings.warn("Memory one player is set to default (1, 0, 0, 1).") self.set_four_vector(four_vector) - if self.name == "Generic Memory One Player": + if self.name == "Generic Memory One IpdPlayer": self.name = "%s: %s" % (self.name, four_vector) def set_four_vector(self, four_vector: Tuple[float, float, float, float]): @@ -86,7 +86,7 @@ def set_four_vector(self, four_vector: Tuple[float, float, float, float]): self._four_vector = dict(zip([(C, C), (C, D), (D, C), (D, D)], four_vector)) self.classifier["stochastic"] = any(0 < x < 1 for x in set(four_vector)) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return self._initial # Determine which probability to use @@ -294,7 +294,7 @@ def __repr__(self) -> str: return "%s: %s" % (self.name, round(self.q, 2)) -class ALLCorALLD(Player): +class ALLCorALLD(IpdPlayer): """This strategy is at the parameter extreme of the ZD strategies (phi = 0). It simply repeats its last move, and so mimics ALLC or ALLD after round one. If the tournament is noisy, there will be long runs of C and D. @@ -319,7 +319,7 @@ class ALLCorALLD(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return random_choice(0.6) return self.history[-1] @@ -335,7 +335,7 @@ class ReactivePlayer(MemoryOnePlayer): - Reactive: [Nowak1989]_ """ - name = "Reactive Player" + name = "Reactive IpdPlayer" def __init__(self, probabilities: Tuple[float, float]) -> None: four_vector = (*probabilities, *probabilities) diff --git a/axelrod/strategies/memorytwo.py b/axelrod/strategies/memorytwo.py index 3466256b3..ab1f91214 100644 --- a/axelrod/strategies/memorytwo.py +++ b/axelrod/strategies/memorytwo.py @@ -5,7 +5,7 @@ from typing import Dict, Tuple from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from .defector import Defector @@ -14,7 +14,7 @@ C, D = Action.C, Action.D -class MemoryTwoPlayer(Player): +class MemoryTwoPlayer(IpdPlayer): """ Uses a sixteen-vector for strategies based on the 16 conditional probabilities P(X | I,J,K,L) where X, I, J, K, L in [C, D] and I, J are the players last @@ -43,7 +43,7 @@ class MemoryTwoPlayer(Player): - Memory Two: [Hilbe2017]_ """ - name = "Generic Memory Two Player" + name = "Generic Memory Two IpdPlayer" classifier = { "memory_depth": 2, "stochastic": False, @@ -76,7 +76,7 @@ def set_initial_sixteen_vector(self, sixteen_vector): warnings.warn("Memory two player is set to default, Cooperator.") self.set_sixteen_vector(sixteen_vector) - if self.name == "Generic Memory Two Player": + if self.name == "Generic Memory Two IpdPlayer": self.name = "%s: %s" % (self.name, sixteen_vector) def set_sixteen_vector(self, sixteen_vector: Tuple): @@ -95,7 +95,7 @@ def set_sixteen_vector(self, sixteen_vector: Tuple): ) # type: Dict[tuple, float] self.classifier["stochastic"] = any(0 < x < 1 for x in set(sixteen_vector)) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) <= 1: return self._initial # Determine which probability to use @@ -206,7 +206,7 @@ def __init__(self) -> None: super().__init__(sixteen_vector) -class MEM2(Player): +class MEM2(IpdPlayer): """A memory-two player that switches between TFT, TFTT, and ALLD. Note that the reference claims that this is a memory two strategy but in @@ -237,7 +237,7 @@ def __init__(self) -> None: self.shift_counter = 3 self.alld_counter = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Update Histories # Note that this assumes that TFT and TFTT do not use internal counters, # Rather that they examine the actual history of play diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py index 09bebd26b..49d02cfae 100644 --- a/axelrod/strategies/meta.py +++ b/axelrod/strategies/meta.py @@ -5,7 +5,7 @@ from axelrod.action import Action from axelrod.classifier import Classifiers -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.strategies import TitForTat from axelrod.strategy_transformers import NiceTransformer from ._strategies import all_strategies @@ -25,16 +25,16 @@ C, D = Action.C, Action.D -class MetaPlayer(Player): +class MetaPlayer(IpdPlayer): """ A generic player that has its own team of players. Names: - - Meta Player: Original name by Karol Langner + - Meta IpdPlayer: Original name by Karol Langner """ - name = "Meta Player" + name = "Meta IpdPlayer" classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, @@ -623,7 +623,7 @@ def __init__( loss_value: float = -2, gain_value: float = 1, memory: list = None, - start_strategy: Player = TitForTat, + start_strategy: IpdPlayer = TitForTat, start_strategy_duration: int = 15, ): super().__init__(team=[start_strategy]) @@ -640,7 +640,7 @@ def __init__( self.gloss_values = None def __repr__(self): - return Player.__repr__(self) + return IpdPlayer.__repr__(self) def gain_loss_translate(self): """ diff --git a/axelrod/strategies/mindcontrol.py b/axelrod/strategies/mindcontrol.py index bdb3e974b..74033b5a5 100644 --- a/axelrod/strategies/mindcontrol.py +++ b/axelrod/strategies/mindcontrol.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class MindController(Player): +class MindController(IpdPlayer): """A player that changes the opponents strategy to cooperate. Names @@ -24,7 +24,7 @@ class MindController(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """ Alters the opponents strategy method to be a lambda function which always returns C. This player will then always return D to take @@ -35,7 +35,7 @@ def strategy(opponent: Player) -> Action: return D -class MindWarper(Player): +class MindWarper(IpdPlayer): """ A player that changes the opponent's strategy but blocks changes to its own. @@ -63,7 +63,7 @@ def __setattr__(self, name: str, val: str): self.__dict__[name] = val @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: opponent.strategy = lambda opponent: C return D @@ -90,6 +90,6 @@ class MindBender(MindWarper): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: opponent.__dict__["strategy"] = lambda opponent: C return D diff --git a/axelrod/strategies/mindreader.py b/axelrod/strategies/mindreader.py index dd6c64269..ccda24ccf 100644 --- a/axelrod/strategies/mindreader.py +++ b/axelrod/strategies/mindreader.py @@ -5,12 +5,12 @@ """ from axelrod._strategy_utils import inspect_strategy, look_ahead from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class MindReader(Player): +class MindReader(IpdPlayer): """A player that looks ahead at what the opponent will do and decides what to do. @@ -35,7 +35,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return D - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Pretends to play the opponent a number of times before each match. The primary purpose is to look far enough ahead to see if a defect will @@ -103,6 +103,6 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Will read the mind of the opponent and play the opponent's strategy. """ return inspect_strategy(self, opponent) diff --git a/axelrod/strategies/mutual.py b/axelrod/strategies/mutual.py index d537bbaad..e06fbda0e 100644 --- a/axelrod/strategies/mutual.py +++ b/axelrod/strategies/mutual.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class Desperate(Player): +class Desperate(IpdPlayer): """A player that only cooperates after mutual defection. Names: @@ -23,7 +23,7 @@ class Desperate(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == D and opponent.history[-1] == D: @@ -31,7 +31,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Hopeless(Player): +class Hopeless(IpdPlayer): """A player that only defects after mutual cooperation. Names: @@ -49,7 +49,7 @@ class Hopeless(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == C and opponent.history[-1] == C: @@ -57,7 +57,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Willing(Player): +class Willing(IpdPlayer): """A player that only defects after mutual defection. Names: @@ -75,7 +75,7 @@ class Willing(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == D and opponent.history[-1] == D: diff --git a/axelrod/strategies/negation.py b/axelrod/strategies/negation.py index cccf218a1..d6d7379e0 100644 --- a/axelrod/strategies/negation.py +++ b/axelrod/strategies/negation.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class Negation(Player): +class Negation(IpdPlayer): """ A player starts by cooperating or defecting randomly if it's their first move, then simply doing the opposite of the opponents last move thereafter. @@ -26,7 +26,7 @@ class Negation(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Random first move if not self.history: return random_choice() diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py index 4703e3c70..aee84f5c3 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/strategies/oncebitten.py @@ -1,12 +1,12 @@ import random from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class OnceBitten(Player): +class OnceBitten(IpdPlayer): """ Cooperates once when the opponent defects, but if they defect twice in a row defaults to forgetful grudger for 10 turns defecting. @@ -33,7 +33,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for mem_length rounds if the opponent ever plays D twice in a row. @@ -54,7 +54,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FoolMeOnce(Player): +class FoolMeOnce(IpdPlayer): """ Forgives one D then retaliates forever on a second D. @@ -75,7 +75,7 @@ class FoolMeOnce(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if not opponent.history: return C if opponent.defections > 1: @@ -83,11 +83,11 @@ def strategy(opponent: Player) -> Action: return C -class ForgetfulFoolMeOnce(Player): +class ForgetfulFoolMeOnce(IpdPlayer): """ Forgives one D then retaliates forever on a second D. Sometimes randomly forgets the defection count, and so keeps a secondary count separate from - the standard count in Player. + the standard count in IpdPlayer. Names: @@ -117,7 +117,7 @@ def __init__(self, forget_probability: float = 0.05) -> None: self._initial = C self.forget_probability = forget_probability - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: r = random.random() if not opponent.history: return self._initial diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py index 96940a1fc..3ed58644a 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/strategies/prober.py @@ -2,7 +2,7 @@ from typing import List from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice Vector = List[float] @@ -11,7 +11,7 @@ C, D = Action.C, Action.D -class CollectiveStrategy(Player): +class CollectiveStrategy(IpdPlayer): """Defined in [Li2009]_. 'It always cooperates in the first move and defects in the second move. If the opponent also cooperates in the first move and defects in the second move, CS will cooperate until the opponent defects. @@ -35,7 +35,7 @@ class CollectiveStrategy(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return C @@ -48,7 +48,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Detective(Player): +class Detective(IpdPlayer): """ Starts with C, D, C, C, or with the given sequence of actions. If the opponent defects at least once in the first fixed rounds, @@ -77,7 +77,7 @@ def __init__(self, initial_actions: List[Action] = None) -> None: else: self.initial_actions = initial_actions - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: hist_size = len(self.history) init_size = len(self.initial_actions) if hist_size < init_size: @@ -87,7 +87,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] # TFT -class Prober(Player): +class Prober(IpdPlayer): """ Plays D, C, C initially. Defects forever if opponent cooperated in moves 2 and 3. Otherwise plays TFT. @@ -108,7 +108,7 @@ class Prober(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -124,7 +124,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober2(Player): +class Prober2(IpdPlayer): """ Plays D, C, C initially. Cooperates forever if opponent played D then C in moves 2 and 3. Otherwise plays TFT. @@ -145,7 +145,7 @@ class Prober2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -161,7 +161,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober3(Player): +class Prober3(IpdPlayer): """ Plays D, C initially. Defects forever if opponent played C in moves 2. Otherwise plays TFT. @@ -182,7 +182,7 @@ class Prober3(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -196,7 +196,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober4(Player): +class Prober4(IpdPlayer): """ Plays C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D initially. Counts retaliating and provocative defections of the opponent. @@ -248,7 +248,7 @@ def __init__(self) -> None: self.unjust_Ds = 0 self.turned_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return self.init_sequence[0] turn = len(self.history) @@ -270,7 +270,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1] == D else C -class HardProber(Player): +class HardProber(IpdPlayer): """ Plays D, D, C, C initially. Defects forever if opponent cooperated in moves 2 and 3. Otherwise plays TFT. @@ -291,7 +291,7 @@ class HardProber(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -309,7 +309,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class NaiveProber(Player): +class NaiveProber(IpdPlayer): """ Like tit-for-tat, but it occasionally defects with a small probability. @@ -341,7 +341,7 @@ def __init__(self, p: float = 0.1) -> None: if (self.p == 0) or (self.p == 1): self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if len(self.history) == 0: return C @@ -385,7 +385,7 @@ def __init__(self, p: float = 0.1) -> None: super().__init__(p) self.probing = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if len(self.history) == 0: return C diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py index dd8145576..30ff7831a 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/strategies/punisher.py @@ -1,12 +1,12 @@ from typing import List from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Punisher(Player): +class Punisher(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with @@ -38,7 +38,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing D if the opponent ever @@ -61,7 +61,7 @@ def strategy(self, opponent: Player) -> Action: return C -class InversePunisher(Player): +class InversePunisher(IpdPlayer): """ An inverted version of Punisher. The player starts by cooperating however will defect if at any point the opponent has defected, and forgets after @@ -90,7 +90,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing C if the opponent ever plays @@ -113,7 +113,7 @@ def strategy(self, opponent: Player) -> Action: return C -class LevelPunisher(Player): +class LevelPunisher(IpdPlayer): """ A player starts by cooperating however, after 10 rounds will defect if at any point the number of defections @@ -135,7 +135,7 @@ class LevelPunisher(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 10: return C elif (len(opponent.history) - opponent.cooperations) / len( @@ -146,7 +146,7 @@ def strategy(self, opponent: Player) -> Action: return C -class TrickyLevelPunisher(Player): +class TrickyLevelPunisher(IpdPlayer): """ A player starts by cooperating however, after 10, 50 and 100 rounds will defect if at any point the percentage of defections @@ -168,7 +168,7 @@ class TrickyLevelPunisher(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C if len(opponent.history) < 10: diff --git a/axelrod/strategies/qlearner.py b/axelrod/strategies/qlearner.py index dd308feb6..72387d010 100644 --- a/axelrod/strategies/qlearner.py +++ b/axelrod/strategies/qlearner.py @@ -3,7 +3,7 @@ from typing import Dict, List, Union from axelrod.action import Action, actions_to_str -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice Score = Union[int, float] @@ -11,7 +11,7 @@ C, D = Action.C, Action.D -class RiskyQLearner(Player): +class RiskyQLearner(IpdPlayer): """A player who learns the best strategies through the q-learning algorithm. @@ -58,7 +58,7 @@ def receive_match_attributes(self): (R, P, S, T) = self.match_attributes["game"].RPST() self.payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Runs a qlearn algorithm while the tournament is running.""" if len(self.history) == 0: self.prev_action = random_choice() @@ -84,7 +84,7 @@ def select_action(self, state: str) -> Action: return max(self.Qs[state], key=lambda x: self.Qs[state][x]) return random_choice() - def find_state(self, opponent: Player) -> str: + def find_state(self, opponent: IpdPlayer) -> str: """ Finds the my_state (the opponents last n moves + its previous proportion of playing C) as a hashable state @@ -102,7 +102,7 @@ def perform_q_learning(self, prev_state: str, state: str, action: Action, reward ] + self.learning_rate * (reward + self.discount_rate * self.Vs[state]) self.Vs[prev_state] = max(self.Qs[prev_state].values()) - def find_reward(self, opponent: Player) -> Dict[Action, Dict[Action, Score]]: + def find_reward(self, opponent: IpdPlayer) -> Dict[Action, Dict[Action, Score]]: """ Finds the reward gained on the last iteration """ diff --git a/axelrod/strategies/rand.py b/axelrod/strategies/rand.py index eb259e37c..57dd1b182 100644 --- a/axelrod/strategies/rand.py +++ b/axelrod/strategies/rand.py @@ -1,9 +1,9 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice -class Random(Player): +class Random(IpdPlayer): """A player who randomly chooses between cooperating and defecting. This strategy came 15th in Axelrod's original tournament. @@ -42,5 +42,5 @@ def __init__(self, p: float = 0.5) -> None: if p in [0, 1]: self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return random_choice(self.p) diff --git a/axelrod/strategies/resurrection.py b/axelrod/strategies/resurrection.py index de266320a..5595ccbc0 100644 --- a/axelrod/strategies/resurrection.py +++ b/axelrod/strategies/resurrection.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Resurrection(Player): +class Resurrection(IpdPlayer): """ A player starts by cooperating and defects if the number of rounds played by the player is greater than five and the last five rounds @@ -29,7 +29,7 @@ class Resurrection(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: @@ -38,7 +38,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class DoubleResurrection(Player): +class DoubleResurrection(IpdPlayer): """ A player starts by cooperating and defects if the number of rounds played by the player is greater than five and the last five rounds @@ -62,7 +62,7 @@ class DoubleResurrection(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: diff --git a/axelrod/strategies/retaliate.py b/axelrod/strategies/retaliate.py index be4b49648..ad30b67f1 100644 --- a/axelrod/strategies/retaliate.py +++ b/axelrod/strategies/retaliate.py @@ -1,12 +1,12 @@ from collections import defaultdict from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class Retaliate(Player): +class Retaliate(IpdPlayer): """ A player starts by cooperating but will retaliate once the opponent has won more than 10 percent times the number of defections the player has. @@ -29,14 +29,14 @@ class Retaliate(Player): def __init__(self, retaliation_threshold: float = 0.1) -> None: """ - Uses the basic init from the Player class, but also set the name to + Uses the basic init from the IpdPlayer class, but also set the name to include the retaliation setting. """ super().__init__() self.retaliation_threshold = retaliation_threshold self.play_counts = defaultdict(int) # type: defaultdict - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ If the opponent has played D to my C more often than x% of the time that I've done the same to him, play D. Otherwise, play C. @@ -82,7 +82,7 @@ def __init__(self, retaliation_threshold: float = 0.05) -> None: super().__init__(retaliation_threshold=retaliation_threshold) -class LimitedRetaliate(Player): +class LimitedRetaliate(IpdPlayer): """ A player that co-operates unless the opponent defects and wins. It will then retaliate by defecting. It stops when either, it has beaten @@ -125,7 +125,7 @@ def __init__( self.retaliation_limit = retaliation_limit self.play_counts = defaultdict(int) # type: defaultdict - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ If the opponent has played D to my C more often than x% of the time that I've done the same to him, retaliate by playing D but stop doing diff --git a/axelrod/strategies/revised_downing.py b/axelrod/strategies/revised_downing.py index 530905c1b..0155df07c 100644 --- a/axelrod/strategies/revised_downing.py +++ b/axelrod/strategies/revised_downing.py @@ -3,11 +3,11 @@ Axelrod's tournaments. """ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class RevisedDowning(Player): +class RevisedDowning(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Leslie Downing. (K59R). @@ -44,7 +44,7 @@ def __init__(self) -> None: self.total_C = 0 # note the same as self.cooperations self.total_D = 0 # note the same as self.defections - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 if round_number == 1: diff --git a/axelrod/strategies/selfsteem.py b/axelrod/strategies/selfsteem.py index 59b43657b..ffd49347d 100644 --- a/axelrod/strategies/selfsteem.py +++ b/axelrod/strategies/selfsteem.py @@ -1,13 +1,13 @@ from math import pi, sin from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class SelfSteem(Player): +class SelfSteem(IpdPlayer): """ This strategy is based on the feeling with the same name. It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies @@ -37,7 +37,7 @@ class SelfSteem(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turns_number = len(self.history) sine_value = sin(2 * pi * turns_number / 10) diff --git a/axelrod/strategies/sequence_player.py b/axelrod/strategies/sequence_player.py index d04d30830..e5a1ae1fd 100644 --- a/axelrod/strategies/sequence_player.py +++ b/axelrod/strategies/sequence_player.py @@ -3,18 +3,18 @@ from axelrod._strategy_utils import thue_morse_generator from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class SequencePlayer(Player): +class SequencePlayer(IpdPlayer): """Abstract base class for players that use a generated sequence to determine their plays. Names: - - Sequence Player: Original name by Marc Harper + - Sequence IpdPlayer: Original name by Marc Harper """ def __init__( @@ -33,7 +33,7 @@ def meta_strategy(value: int) -> Action: else: return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Iterate through the sequence and apply the meta strategy for s in self.sequence_generator: return self.meta_strategy(s) diff --git a/axelrod/strategies/shortmem.py b/axelrod/strategies/shortmem.py index 23bf7c523..4d022d875 100644 --- a/axelrod/strategies/shortmem.py +++ b/axelrod/strategies/shortmem.py @@ -1,10 +1,10 @@ -from axelrod import Player +from axelrod import IpdPlayer from axelrod.action import Action C, D = Action.C, Action.D -class ShortMem(Player): +class ShortMem(IpdPlayer): """ A player starts by always cooperating for the first 10 moves. @@ -32,7 +32,7 @@ class ShortMem(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if len(opponent.history) <= 10: return C diff --git a/axelrod/strategies/stalker.py b/axelrod/strategies/stalker.py index bc533cfa3..0339afee5 100644 --- a/axelrod/strategies/stalker.py +++ b/axelrod/strategies/stalker.py @@ -1,5 +1,5 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from axelrod.strategy_transformers import FinalTransformer @@ -7,7 +7,7 @@ @FinalTransformer((D,), name_prefix=None) # End with defection -class Stalker(Player): +class Stalker(IpdPlayer): """ This is a strategy which is only influenced by the score. @@ -49,14 +49,14 @@ def receive_match_attributes(self): self.wish_score = (R + P) / 2 self.current_score = 0 - def score_last_round(self, opponent: Player): + def score_last_round(self, opponent: IpdPlayer): # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) scores = game.score(last_round) self.current_score += scores[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index cb3b79245..1e5eb29f4 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -1,12 +1,12 @@ from axelrod.action import Action, actions_to_str -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice from axelrod.strategy_transformers import FinalTransformer, TrackHistoryTransformer C, D = Action.C, Action.D -class TitForTat(Player): +class TitForTat(IpdPlayer): """ A player starts by cooperating and then mimics the previous action of the opponent. @@ -36,7 +36,7 @@ class TitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" # First move if not self.history: @@ -47,7 +47,7 @@ def strategy(self, opponent: Player) -> Action: return C -class TitFor2Tats(Player): +class TitFor2Tats(IpdPlayer): """A player starts by cooperating and then defects only after two defects by opponent. @@ -73,11 +73,11 @@ class TitFor2Tats(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if opponent.history[-2:] == [D, D] else C -class TwoTitsForTat(Player): +class TwoTitsForTat(IpdPlayer): """A player starts by cooperating and replies to each defect by two defections. @@ -98,11 +98,11 @@ class TwoTitsForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if D in opponent.history[-2:] else C -class DynamicTwoTitsForTat(Player): +class DynamicTwoTitsForTat(IpdPlayer): """ A player starts by cooperating and then punishes its opponent's defections with defections, but with a dynamic bias towards cooperating @@ -139,7 +139,7 @@ def strategy(opponent): return C -class Bully(Player): +class Bully(IpdPlayer): """A player that behaves opposite to Tit For Tat, including first move. Starts by defecting and then does the opposite of opponent's previous move. @@ -164,11 +164,11 @@ class Bully(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C if opponent.history[-1:] == [D] else D -class SneakyTitForTat(Player): +class SneakyTitForTat(IpdPlayer): """Tries defecting once and repents if punished. Names: @@ -187,7 +187,7 @@ class SneakyTitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) < 2: return C if D not in opponent.history: @@ -197,7 +197,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SuspiciousTitForTat(Player): +class SuspiciousTitForTat(IpdPlayer): """A variant of Tit For Tat that starts off with a defection. Names: @@ -218,11 +218,11 @@ class SuspiciousTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C if opponent.history[-1:] == [C] else D -class AntiTitForTat(Player): +class AntiTitForTat(IpdPlayer): """A strategy that plays the opposite of the opponents previous move. This is similar to Bully, except that the first move is cooperation. @@ -244,11 +244,11 @@ class AntiTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if opponent.history[-1:] == [C] else C -class HardTitForTat(Player): +class HardTitForTat(IpdPlayer): """A variant of Tit For Tat that uses a longer history for retaliation. Names: @@ -268,7 +268,7 @@ class HardTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not opponent.history: return C @@ -279,7 +279,7 @@ def strategy(opponent: Player) -> Action: return C -class HardTitFor2Tats(Player): +class HardTitFor2Tats(IpdPlayer): """A variant of Tit For Two Tats that uses a longer history for retaliation. @@ -300,7 +300,7 @@ class HardTitFor2Tats(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not opponent.history: return C @@ -312,7 +312,7 @@ def strategy(opponent: Player) -> Action: return C -class OmegaTFT(Player): +class OmegaTFT(IpdPlayer): """OmegaTFT modifies Tit For Tat in two ways: - checks for deadlock loops of alternating rounds of (C, D) and (D, C), and attempting to break them @@ -343,7 +343,7 @@ def __init__( self.randomness_counter = 0 self.deadlock_counter = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Cooperate on the first move if not self.history: return C @@ -384,7 +384,7 @@ def strategy(self, opponent: Player) -> Action: return move -class OriginalGradual(Player): +class OriginalGradual(IpdPlayer): """ A player that punishes defections with a growing number of defections but after punishing for `punishment_limit` number of times enters a calming @@ -423,7 +423,7 @@ def __init__(self) -> None: self.punishment_count = 0 self.punishment_limit = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.calming: self.calming = False @@ -448,7 +448,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Gradual(Player): +class Gradual(IpdPlayer): """ Similar to OriginalGradual, this is a player that punishes defections with a growing number of defections but after punishing for `punishment_limit` @@ -489,7 +489,7 @@ def __init__(self) -> None: self.calm_count = 0 self.punish_count = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C @@ -510,7 +510,7 @@ def strategy(self, opponent: Player) -> Action: @TrackHistoryTransformer(name_prefix=None) -class ContriteTitForTat(Player): +class ContriteTitForTat(IpdPlayer): """ A player that corresponds to Tit For Tat if there is no noise. In the case of a noisy match: if the opponent defects as a result of a noisy defection @@ -538,7 +538,7 @@ def __init__(self): self.contrite = False self._recorded_history = [] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -556,7 +556,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class AdaptiveTitForTat(Player): +class AdaptiveTitForTat(IpdPlayer): """ATFT - Adaptive Tit For Tat (Basic Model) Algorithm @@ -606,7 +606,7 @@ def __init__(self, rate: float = 0.5) -> None: self.rate = rate self.world = rate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C @@ -622,7 +622,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SpitefulTitForTat(Player): +class SpitefulTitForTat(IpdPlayer): """ A player starts by cooperating and then mimics the previous action of the opponent until opponent defects twice in a row, at which point player @@ -648,7 +648,7 @@ def __init__(self) -> None: super().__init__() self.retaliating = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if not self.history: return C @@ -665,7 +665,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SlowTitForTwoTats2(Player): +class SlowTitForTwoTats2(IpdPlayer): """ A player plays C twice, then if the opponent plays the same move twice, plays that move, otherwise plays previous move. @@ -686,7 +686,7 @@ class SlowTitForTwoTats2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Start with two cooperations if len(self.history) < 2: @@ -701,7 +701,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class Alexei(Player): +class Alexei(IpdPlayer): """ Plays similar to Tit-for-Tat, but always defect on last turn. @@ -721,7 +721,7 @@ class Alexei(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if opponent.history[-1] == D: @@ -730,7 +730,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class EugineNier(Player): +class EugineNier(IpdPlayer): """ Plays similar to Tit-for-Tat, but with two conditions: 1) Always Defect on Last Move @@ -756,7 +756,7 @@ def __init__(self): super().__init__() self.is_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if not (self.is_defector) and opponent.defections >= 5: @@ -766,7 +766,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class NTitsForMTats(Player): +class NTitsForMTats(IpdPlayer): """ A parameterizable Tit-for-Tat, The arguments are: @@ -812,7 +812,7 @@ def __init__(self, N: int = 3, M: int = 2) -> None: self.classifier["memory_depth"] = max([M, N]) self.retaliate_count = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # if opponent defected consecutively M times, start the retaliation if not self.M or opponent.history[-self.M :].count(D) == self.M: self.retaliate_count = self.N @@ -823,7 +823,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class Michaelos(Player): +class Michaelos(IpdPlayer): """ Plays similar to Tit-for-Tat with two exceptions: 1) Defect on last turn. @@ -851,7 +851,7 @@ def __init__(self): super().__init__() self.is_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if self.is_defector: @@ -867,7 +867,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class RandomTitForTat(Player): +class RandomTitForTat(IpdPlayer): """ A player starts by cooperating and then follows by copying its opponent (tit for tat style). From then on the player @@ -904,7 +904,7 @@ def __init__(self, p: float = 0.5) -> None: if p in [0, 1]: self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" if not self.history: return C diff --git a/axelrod/strategies/verybad.py b/axelrod/strategies/verybad.py index 7a50b8fe5..6fe374f92 100644 --- a/axelrod/strategies/verybad.py +++ b/axelrod/strategies/verybad.py @@ -1,10 +1,10 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer C, D = Action.C, Action.D -class VeryBad(Player): +class VeryBad(IpdPlayer): """ It cooperates in the first three rounds, and uses probability (it implements a memory, which stores the opponent’s moves) to decide for @@ -32,7 +32,7 @@ class VeryBad(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: total_moves = len(opponent.history) if total_moves < 3: diff --git a/axelrod/strategies/worse_and_worse.py b/axelrod/strategies/worse_and_worse.py index cd80ca822..0e1bae7b2 100644 --- a/axelrod/strategies/worse_and_worse.py +++ b/axelrod/strategies/worse_and_worse.py @@ -1,11 +1,11 @@ from axelrod.action import Action -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.random_ import random_choice C, D = Action.C, Action.D -class WorseAndWorse(Player): +class WorseAndWorse(IpdPlayer): """ Defects with probability of 'current turn / 1000'. Therefore it is more and more likely to defect as the round goes on. @@ -28,13 +28,13 @@ class WorseAndWorse(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 probability = 1 - current_round / 1000 return random_choice(probability) -class KnowledgeableWorseAndWorse(Player): +class KnowledgeableWorseAndWorse(IpdPlayer): """ This strategy is based on 'Worse And Worse' but will defect with probability of 'current turn / total no. of turns'. @@ -54,14 +54,14 @@ class KnowledgeableWorseAndWorse(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 expected_length = self.match_attributes["length"] probability = 1 - current_round / expected_length return random_choice(probability) -class WorseAndWorse2(Player): +class WorseAndWorse2(IpdPlayer): """ Plays as tit for tat during the first 20 moves. Then defects with probability (current turn - 20) / current turn. @@ -82,7 +82,7 @@ class WorseAndWorse2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: @@ -94,7 +94,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(probability) -class WorseAndWorse3(Player): +class WorseAndWorse3(IpdPlayer): """ Cooperates in the first turn. Then defects with probability no. of opponent defects / (current turn - 1). @@ -116,7 +116,7 @@ class WorseAndWorse3(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: diff --git a/axelrod/strategy_transformers.py b/axelrod/strategy_transformers.py index 6d50d77c2..15e39ce29 100644 --- a/axelrod/strategy_transformers.py +++ b/axelrod/strategy_transformers.py @@ -16,7 +16,7 @@ from axelrod.strategies.sequence_player import SequencePlayer from .action import Action -from .player import Player +from .player import IpdPlayer from .random_ import random_choice C, D = Action.C, Action.D @@ -69,8 +69,8 @@ def __call__(self, PlayerClass): """ Parameters ---------- - PlayerClass: A subclass of axelrod.Player, e.g. Cooperator - The Player Class to modify + PlayerClass: A subclass of axelrodPlayer, e.g. Cooperator + The IpdPlayer Class to modify Returns ------- @@ -96,7 +96,7 @@ def __call__(self, PlayerClass): # with `strategy_wrapper` def strategy(self, opponent): if strategy_wrapper == dual_wrapper: - # dual_wrapper figures out strategy as if the Player had + # dual_wrapper figures out strategy as if the IpdPlayer had # played the opposite actions of its current history. self._history = self.history.flip_plays() @@ -107,7 +107,7 @@ def strategy(self, opponent): if strategy_wrapper == dual_wrapper: # After dual_wrapper calls the strategy, it returns - # the Player to its original state. + # the IpdPlayer to its original state. self._history = self.history.flip_plays() # Apply the wrapper @@ -120,9 +120,9 @@ def strategy(self, opponent): name = PlayerClass.name name_prefix = self.name_prefix if name_prefix: - # Modify the Player name (class variable inherited from Player) + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) new_class_name = "".join([name_prefix, PlayerClass.__name__]) - # Modify the Player name (class variable inherited from Player) + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) name = " ".join([name_prefix, PlayerClass.name]) original_classifier = copy.deepcopy(PlayerClass.classifier) # Copy @@ -199,7 +199,7 @@ def reduce_for_decorated_class(self_): return Decorator -def player_can_be_pickled(player: Player) -> bool: +def player_can_be_pickled(player: IpdPlayer) -> bool: """ Returns True if pickle.dump(player) does not raise pickle.PicklingError. """ @@ -246,7 +246,7 @@ class StrategyReBuilder(object): that could not normally be pickled. """ - def __call__(self, decorators: list, import_name: str, module_name: str) -> Player: + def __call__(self, decorators: list, import_name: str, module_name: str) -> IpdPlayer: module_ = import_module(module_name) import_class = getattr(module_, import_name) @@ -281,11 +281,11 @@ def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs) Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: an axelrod.Action, C or D The proposed action by the wrapped strategy - proposed_action = Player.strategy(...) + proposed_action = IpdPlayer.strategy(...) args, kwargs: Any additional arguments that you need. @@ -310,7 +310,7 @@ def flip_wrapper(player, opponent, action): FlipTransformer = StrategyTransformerFactory(flip_wrapper, name_prefix="Flipped") -def dual_wrapper(player, opponent: Player, proposed_action: Action) -> Action: +def dual_wrapper(player, opponent: IpdPlayer, proposed_action: Action) -> Action: """Wraps the players strategy function to produce the Dual. The Dual of a strategy will return the exact opposite set of moves to the @@ -321,8 +321,8 @@ def dual_wrapper(player, opponent: Player, proposed_action: Action) -> Action: Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: axelrod.Action, C or D The proposed action by the wrapped strategy @@ -588,8 +588,8 @@ def joss_ann_wrapper(player, opponent, proposed_action, probability): Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: axelrod.Action, C or D The proposed action by the wrapped strategy probability: tuple diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py index a627fe885..b8b70aabe 100644 --- a/axelrod/tests/integration/test_matches.py +++ b/axelrod/tests/integration/test_matches.py @@ -29,7 +29,7 @@ def test_outcome_repeats(self, strategies, turns): """A test that if we repeat 3 matches with deterministic and well behaved strategies then we get the same result""" players = [s() for s in strategies] - matches = [axl.Match(players, turns) for _ in range(3)] + matches = [axl.IpdMatch(players, turns) for _ in range(3)] self.assertEqual(matches[0].play(), matches[1].play()) self.assertEqual(matches[1].play(), matches[2].play()) @@ -48,7 +48,7 @@ def test_outcome_repeats_stochastic(self, strategies, turns, seed): for _ in range(3): axl.seed(seed) players = [s() for s in strategies] - results.append(axl.Match(players, turns).play()) + results.append(axl.IpdMatch(players, turns).play()) self.assertEqual(results[0], results[1]) self.assertEqual(results[1], results[2]) @@ -61,11 +61,11 @@ def test_matches_with_det_player_for_stochastic_classes(self): p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) p3 = axl.MemoryOnePlayer(four_vector=(1, 1, 1, 0)) - m = axl.Match((p1, p2), turns=3) + m = axl.IpdMatch((p1, p2), turns=3) self.assertEqual(m.play(), [(C, C), (D, C), (D, D)]) - m = axl.Match((p2, p3), turns=3) + m = axl.IpdMatch((p2, p3), turns=3) self.assertEqual(m.play(), [(C, C), (C, C), (C, C)]) - m = axl.Match((p1, p3), turns=3) + m = axl.IpdMatch((p1, p3), turns=3) self.assertEqual(m.play(), [(C, C), (D, C), (D, C)]) diff --git a/axelrod/tests/integration/test_sample_tournaments.py b/axelrod/tests/integration/test_sample_tournaments.py index e98aa9ad2..099cdb070 100644 --- a/axelrod/tests/integration/test_sample_tournaments.py +++ b/axelrod/tests/integration/test_sample_tournaments.py @@ -8,7 +8,7 @@ class TestSampleTournaments(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() @classmethod def get_test_outcome(cls, outcome, turns=10): @@ -18,7 +18,7 @@ def get_test_outcome(cls, outcome, turns=10): players = [getattr(axl, n)() for n in names] # Play the tournament and build the actual outcome tuples. - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=players, game=cls.game, turns=turns, repetitions=1 ) results = tournament.play(progress_bar=False) diff --git a/axelrod/tests/integration/test_tournament.py b/axelrod/tests/integration/test_tournament.py index 2ab59c974..d5df8a02c 100644 --- a/axelrod/tests/integration/test_tournament.py +++ b/axelrod/tests/integration/test_tournament.py @@ -14,7 +14,7 @@ class TestTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [ axl.Cooperator(), axl.TitForTat(), @@ -57,7 +57,7 @@ def test_big_tournaments(self, tournament): ) def test_serial_play(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -69,7 +69,7 @@ def test_serial_play(self): self.assertEqual(actual_outcome, self.expected_outcome) def test_parallel_play(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -89,7 +89,7 @@ def test_repeat_tournament_deterministic(self): ] files = [] for _ in range(2): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name="test", players=deterministic_players, game=self.game, @@ -113,7 +113,7 @@ def test_repeat_tournament_stochastic(self): for s in axl.short_run_time_strategies if axl.Classifiers["stochastic"](s()) ] - tournament = axl.Tournament( + tournament = axl.IpdTournament( name="test", players=stochastic_players, game=self.game, @@ -130,13 +130,13 @@ class TestNoisyTournament(unittest.TestCase): def test_noisy_tournament(self): # Defector should win for low noise players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players, turns=5, repetitions=3, noise=0.0) + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.0) results = tournament.play(progress_bar=False) self.assertEqual(results.ranked_names[0], "Defector") # If the noise is large enough, cooperator should win players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players, turns=5, repetitions=3, noise=0.75) + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.75) results = tournament.play(progress_bar=False) self.assertEqual(results.ranked_names[0], "Cooperator") @@ -149,7 +149,7 @@ def test_players_do_not_know_match_length(self): p1 = FinalTransformer(["D", "D"])(axl.Cooperator)() p2 = FinalTransformer(["D", "D"])(axl.Cooperator)() players = [p1, p2] - tournament = axl.Tournament(players, prob_end=0.5, repetitions=1) + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=1) results = tournament.play(progress_bar=False) # Check that both plays always cooperated for rating in results.cooperating_rating: @@ -165,7 +165,7 @@ def test_matches_have_different_length(self): p3 = axl.Cooperator() players = [p1, p2, p3] axl.seed(0) - tournament = axl.Tournament(players, prob_end=0.5, repetitions=2) + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=2) results = tournament.play(progress_bar=False) # Check that match length are different across the repetitions self.assertNotEqual(results.match_lengths[0], results.match_lengths[1]) diff --git a/axelrod/tests/property.py b/axelrod/tests/property.py index 95d08e2b8..705acbdc9 100644 --- a/axelrod/tests/property.py +++ b/axelrod/tests/property.py @@ -61,7 +61,7 @@ def matches( players = [s() for s in strategies] turns = draw(integers(min_value=min_turns, max_value=max_turns)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - match = axl.Match(players, turns=turns, noise=noise) + match = axl.IpdMatch(players, turns=turns, noise=noise) return match @@ -108,7 +108,7 @@ def tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament(players, turns=turns, repetitions=repetitions, noise=noise) + tournament = axl.IpdTournament(players, turns=turns, repetitions=repetitions, noise=noise) return tournament @@ -155,7 +155,7 @@ def prob_end_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=repetitions, noise=noise ) return tournament @@ -223,7 +223,7 @@ def spatial_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, turns=turns, repetitions=repetitions, noise=noise, edges=edges ) return tournament @@ -291,7 +291,7 @@ def prob_end_spatial_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges ) return tournament @@ -331,5 +331,5 @@ def games(draw, prisoners_dilemma=True, max_value=100): r = draw(integers(max_value=max_value)) p = draw(integers(max_value=max_value)) - game = axl.Game(r=r, s=s, t=t, p=p) + game = axl.IpdGame(r=r, s=s, t=t, p=p) return game diff --git a/axelrod/tests/strategies/test_adaptive.py b/axelrod/tests/strategies/test_adaptive.py index 1fbca26f4..5b796ce4e 100644 --- a/axelrod/tests/strategies/test_adaptive.py +++ b/axelrod/tests/strategies/test_adaptive.py @@ -40,7 +40,7 @@ def test_scoring(self): player.play(opponent) player.play(opponent) self.assertEqual(3, player.scores[C]) - game = axl.Game(-3, 10, 10, 10) + game = axl.IpdGame(-3, 10, 10, 10) player.set_match_attributes(game=game) player.play(opponent) self.assertEqual(0, player.scores[C]) diff --git a/axelrod/tests/strategies/test_axelrod_first.py b/axelrod/tests/strategies/test_axelrod_first.py index 35ee2d5f4..1327757a3 100644 --- a/axelrod/tests/strategies/test_axelrod_first.py +++ b/axelrod/tests/strategies/test_axelrod_first.py @@ -100,20 +100,20 @@ def test_cooperation_probability(self): p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100) self.assertEqual(1.0, p1._cooperation_probability()) p2 = axl.Cooperator() - match = axl.Match((p1, p2), turns=50) + match = axl.IpdMatch((p1, p2), turns=50) match.play() self.assertEqual(0.9, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=100) + match = axl.IpdMatch((p1, p2), turns=100) match.play() self.assertEqual(0.8, p1._cooperation_probability()) # Test cooperation probabilities, second set of params p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200) self.assertEqual(1.0, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=100) + match = axl.IpdMatch((p1, p2), turns=100) match.play() self.assertEqual(0.75, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=200) + match = axl.IpdMatch((p1, p2), turns=200) match.play() self.assertEqual(0.5, p1._cooperation_probability()) @@ -122,7 +122,7 @@ def test_decay(self): for opponent in [axl.Cooperator(), axl.Defector()]: player = self.player() self.assertEqual(player._cooperation_probability(), player._start_coop_prob) - match = axl.Match((player, opponent), turns=201) + match = axl.IpdMatch((player, opponent), turns=201) match.play() self.assertEqual(player._cooperation_probability(), player._end_coop_prob) @@ -487,7 +487,7 @@ def test_init(self): self.assertFalse(player.opponent_is_random) def test_strategy(self): - # Our Player (SteinAndRapoport) vs Cooperator + # Our IpdPlayer (SteinAndRapoport) vs Cooperator # After 15th round (pvalue < alpha) still plays TitForTat. # Note it always defects on the last two rounds. opponent = axl.Cooperator() diff --git a/axelrod/tests/strategies/test_axelrod_second.py b/axelrod/tests/strategies/test_axelrod_second.py index 7c9122d8d..1b89e5b36 100644 --- a/axelrod/tests/strategies/test_axelrod_second.py +++ b/axelrod/tests/strategies/test_axelrod_second.py @@ -410,7 +410,7 @@ def test_strategy(self): # Test to make sure logic matches Fortran (discrepancy found 8/23/2017) opponent = axl.AntiTitForTat() - # Actions come from a match run by Axelrod Fortran using Player('k86r') + # Actions come from a match run by Axelrod Fortran using IpdPlayer('k86r') actions = [ (C, C), (C, D), @@ -1157,7 +1157,7 @@ def test_strategy(self): expected_actions += [(D, C)] random.seed(10) player = self.player() - match = axl.Match((player, axl.Random()), turns=len(expected_actions)) + match = axl.IpdMatch((player, axl.Random()), turns=len(expected_actions)) # The history matrix will be [[0, 2], [5, 6], [3, 6], [4, 2]] actions = match.play() self.assertEqual(actions, expected_actions) diff --git a/axelrod/tests/strategies/test_evolvable_player.py b/axelrod/tests/strategies/test_evolvable_player.py index ccb6ac37d..de6f9623f 100644 --- a/axelrod/tests/strategies/test_evolvable_player.py +++ b/axelrod/tests/strategies/test_evolvable_player.py @@ -155,12 +155,12 @@ def behavior_test(self, player1, player2): for opponent_class in [axl.Random, axl.TitForTat, axl.Alternator]: axl.seed(0) opponent = opponent_class() - match = axl.Match((player1.clone(), opponent)) + match = axl.IpdMatch((player1.clone(), opponent)) results1 = match.play() axl.seed(0) opponent = opponent_class() - match = axl.Match((player2.clone(), opponent)) + match = axl.IpdMatch((player2.clone(), opponent)) results2 = match.play() self.assertEqual(results1, results2) diff --git a/axelrod/tests/strategies/test_finite_state_machines.py b/axelrod/tests/strategies/test_finite_state_machines.py index 12fe52e5d..309ef7cfe 100644 --- a/axelrod/tests/strategies/test_finite_state_machines.py +++ b/axelrod/tests/strategies/test_finite_state_machines.py @@ -7,7 +7,7 @@ import axelrod as axl from axelrod.compute_finite_state_machine_memory import get_memory_from_transitions from axelrod.evolvable_player import InsufficientParametersError -from axelrod.strategies.finite_state_machines import EvolvableFSMPlayer, FSMPlayer, SimpleFSM +from axelrod.strategies import EvolvableFSMPlayer, FSMPlayer, SimpleFSM from .test_player import TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer @@ -96,7 +96,7 @@ class TestSampleFSMPlayer(TestPlayer): """Test a few sample tables to make sure that the finite state machines are working as intended.""" - name = "FSM Player: ((1, C, 1, C), (1, D, 1, D)), 1, C" + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" player = axl.FSMPlayer expected_classifier = { @@ -168,7 +168,7 @@ def test_wsls(self): class TestFSMPlayer(TestPlayer): - name = "FSM Player: ((1, C, 1, C), (1, D, 1, D)), 1, C" + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" player = axl.FSMPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_hmm.py b/axelrod/tests/strategies/test_hmm.py index 558da9bf4..2c54eedb7 100644 --- a/axelrod/tests/strategies/test_hmm.py +++ b/axelrod/tests/strategies/test_hmm.py @@ -4,14 +4,14 @@ import random import axelrod as axl +from axelrod.random_ import random_vector from axelrod.evolvable_player import InsufficientParametersError -from axelrod.strategies.hmm import ( +from axelrod.strategies import ( EvolvableHMMPlayer, HMMPlayer, SimpleHMM, - is_stochastic_matrix, - random_vector, ) +from axelrod.strategies.hmm import is_stochastic_matrix from .test_player import TestMatch, TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer @@ -149,7 +149,7 @@ def test_malformed_params(self): class TestHMMPlayer(TestPlayer): - name = "HMM Player: 0, C" + name = "HMM IpdPlayer: 0, C" player = axl.HMMPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_human.py b/axelrod/tests/strategies/test_human.py index 46c1f3f8a..d8e6877e5 100644 --- a/axelrod/tests/strategies/test_human.py +++ b/axelrod/tests/strategies/test_human.py @@ -113,7 +113,7 @@ def test_get_human_input_D(self): def test_strategy(self): human = Human() expected_action = C - actual_action = human.strategy(axl.Player(), lambda: C) + actual_action = human.strategy(axl.IpdPlayer(), lambda: C) self.assertEqual(actual_action, expected_action) def test_reset_history_and_attributes(self): diff --git a/axelrod/tests/strategies/test_hunter.py b/axelrod/tests/strategies/test_hunter.py index 7c2912494..e5b067be7 100644 --- a/axelrod/tests/strategies/test_hunter.py +++ b/axelrod/tests/strategies/test_hunter.py @@ -2,8 +2,6 @@ import unittest -import random - import axelrod as axl from axelrod.strategies.hunter import detect_cycle diff --git a/axelrod/tests/strategies/test_memoryone.py b/axelrod/tests/strategies/test_memoryone.py index 784baadf4..3c8dab63e 100644 --- a/axelrod/tests/strategies/test_memoryone.py +++ b/axelrod/tests/strategies/test_memoryone.py @@ -19,9 +19,9 @@ class TestGenericPlayerOne(unittest.TestCase): p3 = axl.MemoryOnePlayer(four_vector=(1, 0.5, 1, 0.5)) def test_name(self): - self.assertEqual(self.p1.name, "Generic Memory One Player: (0, 0, 0, 0)") - self.assertEqual(self.p2.name, "Generic Memory One Player: (1, 0, 1, 0)") - self.assertEqual(self.p3.name, "Generic Memory One Player: (1, 0.5, 1, 0.5)") + self.assertEqual(self.p1.name, "Generic Memory One IpdPlayer: (0, 0, 0, 0)") + self.assertEqual(self.p2.name, "Generic Memory One IpdPlayer: (1, 0, 1, 0)") + self.assertEqual(self.p3.name, "Generic Memory One IpdPlayer: (1, 0.5, 1, 0.5)") def test_stochastic_classification(self): self.assertFalse(axl.Classifiers["stochastic"](self.p1)) @@ -93,7 +93,7 @@ def test_strategy(self): self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) def test_four_vector(self): - (R, P, S, T) = axl.Game().RPST() + (R, P, S, T) = axl.IpdGame().RPST() p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) expected_dictionary = {(C, C): 1.0, (C, D): p, (D, C): 1.0, (D, D): p} test_four_vector(self, expected_dictionary) @@ -293,9 +293,9 @@ class TestGenericReactiveStrategy(unittest.TestCase): p3 = axl.ReactivePlayer(probabilities=(1, 0.5)) def test_name(self): - self.assertEqual(self.p1.name, "Reactive Player: (0, 0)") - self.assertEqual(self.p2.name, "Reactive Player: (1, 0)") - self.assertEqual(self.p3.name, "Reactive Player: (1, 0.5)") + self.assertEqual(self.p1.name, "Reactive IpdPlayer: (0, 0)") + self.assertEqual(self.p2.name, "Reactive IpdPlayer: (1, 0)") + self.assertEqual(self.p3.name, "Reactive IpdPlayer: (1, 0.5)") def test_four_vector(self): self.assertEqual( diff --git a/axelrod/tests/strategies/test_memorytwo.py b/axelrod/tests/strategies/test_memorytwo.py index 3318c6983..ca52a0a4b 100644 --- a/axelrod/tests/strategies/test_memorytwo.py +++ b/axelrod/tests/strategies/test_memorytwo.py @@ -7,7 +7,7 @@ import warnings import axelrod as axl -from axelrod.strategies.memorytwo import MemoryTwoPlayer +from axelrod.strategies import MemoryTwoPlayer from .test_player import TestPlayer @@ -51,19 +51,19 @@ class TestGenericPlayerTwo(unittest.TestCase): def test_name(self): self.assertEqual( self.p1.name, - "Generic Memory Two Player: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + "Generic Memory Two IpdPlayer: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", ) self.assertEqual( self.p2.name, - "Generic Memory Two Player: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", + "Generic Memory Two IpdPlayer: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", ) self.assertEqual( self.p3.name, - "Generic Memory Two Player: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", + "Generic Memory Two IpdPlayer: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", ) self.assertEqual( self.p4.name, - "Generic Memory Two Player: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", + "Generic Memory Two IpdPlayer: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", ) def test_deterministic_classification(self): @@ -142,7 +142,7 @@ def test_exception_if_probability_vector_outside_valid_values(self): class TestMemoryStochastic(TestPlayer): name = ( - "Generic Memory Two Player: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" + "Generic Memory Two IpdPlayer: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" ) player = axl.MemoryTwoPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/tests/strategies/test_meta.py index 7256369c8..4559180e8 100644 --- a/axelrod/tests/strategies/test_meta.py +++ b/axelrod/tests/strategies/test_meta.py @@ -15,7 +15,7 @@ class TestMetaPlayer(TestPlayer): dictionary and the reset methods. Inherit from this class just as you would the TestPlayer class.""" - name = "Meta Player" + name = "Meta IpdPlayer" player = axl.MetaPlayer expected_classifier = { "memory_depth": float("inf"), @@ -86,7 +86,7 @@ def test_clone(self, seed): player2.reset() for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -107,7 +107,7 @@ class TestMetaMajority(TestMetaPlayer): def test_strategy(self): P1 = axl.MetaMajority() - P2 = axl.Player() + P2 = axl.IpdPlayer() # With more cooperators on the team than defectors, we should cooperate. P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] @@ -138,7 +138,7 @@ def test_team(self): def test_strategy(self): P1 = axl.MetaMinority() - P2 = axl.Player() + P2 = axl.IpdPlayer() # With more cooperators on the team, we should defect. P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] @@ -164,7 +164,7 @@ class TestNiceMetaWinner(TestMetaPlayer): def test_strategy(self): P1 = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) - P2 = axl.Player() + P2 = axl.IpdPlayer() # This meta player will simply choose the strategy with the highest # current score. diff --git a/axelrod/tests/strategies/test_mutual.py b/axelrod/tests/strategies/test_mutual.py index 8ba512912..921fc698d 100644 --- a/axelrod/tests/strategies/test_mutual.py +++ b/axelrod/tests/strategies/test_mutual.py @@ -22,33 +22,33 @@ class TestDesperate(TestPlayer): } def test_strategy(self): - # Our Player (Desperate) vs Cooperator SEED --> 1 + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Cooperator SEED --> 2 + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Desperate) vs Defector SEED --> 1 + # Our IpdPlayer (Desperate) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Defector SEED --> 2 + # Our IpdPlayer (Desperate) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (C, D), (D, D), (C, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Desperate) vs Alternator SEED --> 1 + # Our IpdPlayer (Desperate) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Alternator SEED --> 2 + # Our IpdPlayer (Desperate) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) @@ -69,33 +69,33 @@ class TestHopeless(TestPlayer): } def test_strategy(self): - # Our Player (Hopeless) vs Cooperator SEED --> 1 + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (D, C), (C, C), (D, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Cooperator SEED --> 2 + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (C, C), (D, C), (C, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Hopeless) vs Defector SEED --> 1 + # Our IpdPlayer (Hopeless) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Defector SEED --> 2 + # Our IpdPlayer (Hopeless) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Hopeless) vs Alternator SEED --> 1 + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Alternator SEED --> 2 + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (C, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) @@ -116,33 +116,33 @@ class TestWilling(TestPlayer): } def test_strategy(self): - # Our Player (Willing) vs Cooperator SEED --> 1 + # Our IpdPlayer (Willing) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Cooperator SEED --> 2 + # Our IpdPlayer (Willing) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Willing) vs Defector SEED --> 1 + # Our IpdPlayer (Willing) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Defector SEED --> 2 + # Our IpdPlayer (Willing) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (D, D), (D, D), (D, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Willing) vs Alternator SEED --> 1 + # Our IpdPlayer (Willing) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Alternator SEED --> 2 + # Our IpdPlayer (Willing) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (C, D), (C, C), (C, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_player.py b/axelrod/tests/strategies/test_player.py index 2b4dd33c4..dba3d29d8 100644 --- a/axelrod/tests/strategies/test_player.py +++ b/axelrod/tests/strategies/test_player.py @@ -43,8 +43,8 @@ def defect(*args): } -class ParameterisedTestPlayer(axl.Player): - """A simple Player class for testing init parameters""" +class ParameterisedTestPlayer(axl.IpdPlayer): + """A simple IpdPlayer class for testing init parameters""" name = "ParameterisedTestPlayer" classifier = _test_classifier @@ -54,8 +54,8 @@ def __init__(self, arg_test1="testing1", arg_test2="testing2"): class TestPlayerClass(unittest.TestCase): - name = "Player" - player = axl.Player + name = "IpdPlayer" + player = axl.IpdPlayer classifier = {"stochastic": False} def test_play(self): @@ -90,7 +90,7 @@ def test_play(self): def test_state_distribution(self): player1 = axl.MockPlayer([C, C, D, D, C]) player2 = axl.MockPlayer([C, D, C, D, D]) - match = axl.Match((player1, player2), turns=5) + match = axl.IpdMatch((player1, player2), turns=5) _ = match.play() self.assertEqual( player1.state_distribution, @@ -112,7 +112,7 @@ def test_noisy_play(self): self.assertEqual(player2.history[0], D) def test_update_history(self): - player = axl.Player() + player = axl.IpdPlayer() self.assertEqual(player.history, []) self.assertEqual(player.cooperations, 0) self.assertEqual(player.defections, 0) @@ -126,7 +126,7 @@ def test_update_history(self): self.assertEqual(player.cooperations, 1) def test_history_assignment(self): - player = axl.Player() + player = axl.IpdPlayer() with self.assertRaises(AttributeError): player.history = [] @@ -146,7 +146,7 @@ def test_clone(self): seed = random.randint(0, 10 ** 6) for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -319,9 +319,9 @@ def test_init_kwargs(self): # Test that init_kwargs exist and are empty self.assertEqual(self.player().init_kwargs, {}) # Test that passing a positional argument raises an error - self.assertRaises(TypeError, axl.Player, "test") + self.assertRaises(TypeError, axl.IpdPlayer, "test") # Test that passing a keyword argument raises an error - self.assertRaises(TypeError, axl.Player, arg_test1="test") + self.assertRaises(TypeError, axl.IpdPlayer, arg_test1="test") # Tests for Players with init parameters @@ -354,7 +354,7 @@ def test_init_kwargs(self): ) -class TestOpponent(axl.Player): +class TestOpponent(axl.IpdPlayer): """A player who only exists so we have something to test against""" name = "TestOpponent" @@ -496,7 +496,7 @@ def test_clone(self, seed): player2.reset() for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -554,9 +554,9 @@ def versus_test( Tests a sequence of outcomes for two given players. Parameters: ----------- - opponent: Player or list + opponent: IpdPlayer or list An instance of a player OR a sequence of actions. If a sequence of - actions is passed, a Mock Player is created that cycles over that + actions is passed, a Mock IpdPlayer is created that cycles over that sequence. expected_actions: List The expected outcomes of the match (list of tuples of actions). @@ -587,7 +587,7 @@ def versus_test( player = self.player(**init_kwargs) - match = axl.Match( + match = axl.IpdMatch( (player, opponent), turns=turns, noise=noise, @@ -657,7 +657,7 @@ def versus_test( if seed: axl.seed(seed) turns = len(expected_actions1) - match = axl.Match((player1, player2), turns=turns, noise=noise) + match = axl.IpdMatch((player1, player2), turns=turns, noise=noise) match.play() # Test expected sequence of play. for i, (outcome1, outcome2) in enumerate( @@ -696,7 +696,7 @@ def test_memory(player, opponent, memory_length, seed=0, turns=10): """ # Play the match normally. axl.seed(seed) - match = axl.Match((player, opponent), turns=turns) + match = axl.IpdMatch((player, opponent), turns=turns) plays = [p[0] for p in match.play()] # Play with limited history. @@ -705,7 +705,7 @@ def test_memory(player, opponent, memory_length, seed=0, turns=10): player._history = axl.LimitedHistory(memory_length) opponent._history = axl.LimitedHistory(memory_length) axl.seed(seed) - match = axl.Match((player, opponent), turns=turns, reset=False) + match = axl.IpdMatch((player, opponent), turns=turns, reset=False) limited_plays = [p[0] for p in match.play()] return plays == limited_plays diff --git a/axelrod/tests/strategies/test_qlearner.py b/axelrod/tests/strategies/test_qlearner.py index 81a261109..1b07a4bfb 100644 --- a/axelrod/tests/strategies/test_qlearner.py +++ b/axelrod/tests/strategies/test_qlearner.py @@ -24,7 +24,7 @@ class TestRiskyQLearner(TestPlayer): } def test_payoff_matrix(self): - (R, P, S, T) = axl.Game().RPST() + (R, P, S, T) = axl.IpdGame().RPST() payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} player = self.player() self.assertEqual(player.payoff_matrix, payoff_matrix) diff --git a/axelrod/tests/strategies/test_stalker.py b/axelrod/tests/strategies/test_stalker.py index cc013543c..31acdb769 100644 --- a/axelrod/tests/strategies/test_stalker.py +++ b/axelrod/tests/strategies/test_stalker.py @@ -87,7 +87,7 @@ def test_strategy(self): def test_reset(self): axl.seed(0) player = axl.Stalker() - m = axl.Match((player, axl.Alternator())) + m = axl.IpdMatch((player, axl.Alternator())) m.play() self.assertNotEqual(player.current_score, 0) player.reset() diff --git a/axelrod/tests/strategies/test_titfortat.py b/axelrod/tests/strategies/test_titfortat.py index b99522d5f..28b5e36a0 100644 --- a/axelrod/tests/strategies/test_titfortat.py +++ b/axelrod/tests/strategies/test_titfortat.py @@ -44,7 +44,7 @@ def test_strategy(self): actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] self.versus_test(axl.Defector(), expected_actions=actions) - # This behaviour is independent of knowledge of the Match length + # This behaviour is independent of knowledge of the IpdMatch length actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] self.versus_test( axl.Alternator(), @@ -486,7 +486,7 @@ def test_specific_set_of_results(self): axl.CyclerDDC(), ] axl.seed(1) - tournament = axl.Tournament(players, turns=1000, repetitions=1) + tournament = axl.IpdTournament(players, turns=1000, repetitions=1) results = tournament.play(progress_bar=False) scores = [ round(average_score_per_turn * 1000, 1) @@ -671,7 +671,7 @@ def test_output_from_literature(self): axl.seed(1) turns = 1000 - tournament = axl.Tournament(players, turns=turns, repetitions=1) + tournament = axl.IpdTournament(players, turns=turns, repetitions=1) results = tournament.play(progress_bar=False) scores = [ round(average_score_per_turn * 1000, 1) @@ -723,8 +723,8 @@ def test_is_tit_for_tat_with_no_noise(self, strategies, turns): tft = axl.TitForTat() ctft = self.player() opponent = strategies[0]() - m1 = axl.Match((tft, opponent), turns) - m2 = axl.Match((ctft, opponent), turns) + m1 = axl.IpdMatch((tft, opponent), turns) + m2 = axl.IpdMatch((ctft, opponent), turns) self.assertEqual(m1.play(), m2.play()) def test_strategy_with_noise(self): diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/tests/unit/test_classification.py index e0db01e5d..e1f650bb9 100644 --- a/axelrod/tests/unit/test_classification.py +++ b/axelrod/tests/unit/test_classification.py @@ -14,10 +14,10 @@ memory_depth, rebuild_classifier_table, ) -from axelrod.player import Player +from axelrod.player import IpdPlayer -class TitForTatWithEmptyClassifier(Player): +class TitForTatWithEmptyClassifier(IpdPlayer): """ Same name as TitForTat, but with empty classifier. """ @@ -27,7 +27,7 @@ class TitForTatWithEmptyClassifier(Player): classifier = {} -class TitForTatWithNonTrivialInitialzer(Player): +class TitForTatWithNonTrivialInitialzer(IpdPlayer): """ Same name as TitForTat, but with empty classifier. """ diff --git a/axelrod/tests/unit/test_ecosystem.py b/axelrod/tests/unit/test_ecosystem.py index 92098ba17..ec552b542 100644 --- a/axelrod/tests/unit/test_ecosystem.py +++ b/axelrod/tests/unit/test_ecosystem.py @@ -8,7 +8,7 @@ class TestEcosystem(unittest.TestCase): @classmethod def setUpClass(cls): - cooperators = axl.Tournament( + cooperators = axl.IpdTournament( players=[ axl.Cooperator(), axl.Cooperator(), @@ -16,7 +16,7 @@ def setUpClass(cls): axl.Cooperator(), ] ) - defector_wins = axl.Tournament( + defector_wins = axl.IpdTournament( players=[ axl.Cooperator(), axl.Cooperator(), diff --git a/axelrod/tests/unit/test_filters.py b/axelrod/tests/unit/test_filters.py index f2d9e2e17..5b6816b53 100644 --- a/axelrod/tests/unit/test_filters.py +++ b/axelrod/tests/unit/test_filters.py @@ -1,7 +1,7 @@ import unittest import axelrod as axl -from axelrod.player import Player +from axelrod.player import IpdPlayer from axelrod.strategies._filters import * from hypothesis import example, given, settings @@ -9,7 +9,7 @@ class TestFilters(unittest.TestCase): - class TestStrategy(Player): + class TestStrategy(IpdPlayer): classifier = { "stochastic": True, "inspects_source": False, @@ -127,17 +127,17 @@ def test_passes_filterset(self, smaller, larger): self.assertFalse(passes_filterset(self.TestStrategy, sparse_failing_filterset)) def test_filtered_strategies(self): - class StochasticTestStrategy(Player): + class StochasticTestStrategy(IpdPlayer): classifier = { "stochastic": True, "memory_depth": float("inf"), "makes_use_of": [], } - class MemoryDepth2TestStrategy(Player): + class MemoryDepth2TestStrategy(IpdPlayer): classifier = {"stochastic": False, "memory_depth": 2, "makes_use_of": []} - class UsesLengthTestStrategy(Player): + class UsesLengthTestStrategy(IpdPlayer): classifier = { "stochastic": True, "memory_depth": float("inf"), diff --git a/axelrod/tests/unit/test_fingerprint.py b/axelrod/tests/unit/test_fingerprint.py index f60a93e7a..780bfd3bb 100644 --- a/axelrod/tests/unit/test_fingerprint.py +++ b/axelrod/tests/unit/test_fingerprint.py @@ -10,7 +10,6 @@ import axelrod as axl from axelrod.fingerprint import AshlockFingerprint, Point, TransitiveFingerprint from axelrod.load_data_ import axl_filename -from axelrod.strategy_transformers import DualTransformer, JossAnnTransformer from axelrod.tests.property import strategy_lists from hypothesis import given, settings @@ -145,9 +144,9 @@ def test_fingerprint_interactions_cooperator(self): # Interactions are invariant for any points where y is zero, and # the score should be maximum possible. - # Player 1 is Point(0.0, 0.0). - # Player 4 is Point(0.5, 0.0). - # Player 7 is Point(1.0, 0.0). + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). for iplayer in (1, 4, 7): for turns in af.interactions[(0, iplayer)]: self.assertEqual(len(turns), 5) @@ -156,7 +155,7 @@ def test_fingerprint_interactions_cooperator(self): self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - # Player 3 is Point(0.0, 1.0), which means constant defection + # IpdPlayer 3 is Point(0.0, 1.0), which means constant defection # from the probe. But the Cooperator doesn't change and score is zero. for turns in af.interactions[(0, 3)]: self.assertEqual(len(turns), 5) @@ -170,9 +169,9 @@ def test_fingerprint_interactions_titfortat(self): # Tit-for-Tats will always cooperate if left to their own devices, # so interactions are invariant for any points where y is zero, # and the score should be maximum possible. - # Player 1 is Point(0.0, 0.0). - # Player 4 is Point(0.5, 0.0). - # Player 7 is Point(1.0, 0.0). + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). for iplayer in (1, 4, 7): for turns in af.interactions[(0, iplayer)]: self.assertEqual(len(turns), 5) @@ -181,7 +180,7 @@ def test_fingerprint_interactions_titfortat(self): self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - # Player 3 is Point(0.0, 1.0) which implies defection after the + # IpdPlayer 3 is Point(0.0, 1.0) which implies defection after the # first turn since Tit-for-Tat is playing, and a score of 0.8 # since we get zero on first turn and one point per turn later. for turns in af.interactions[(0, 3)]: @@ -464,23 +463,23 @@ def test_analyse_cooperation_ratio(self): filename = axl_filename(path) with open(filename, "w") as f: f.write( - """Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions -0,0,1,0,Player0,Player1,CCC -0,1,0,0,Player1,Player0,DDD -1,0,1,1,Player0,Player1,CCC -1,1,0,1,Player1,Player0,DDD -2,0,2,0,Player0,Player2,CCD -2,2,0,0,Player2,Player0,DDD -3,0,2,1,Player0,Player2,CCC -3,2,0,1,Player2,Player0,DDD -4,0,3,0,Player0,Player3,CCD -4,3,0,0,Player3,Player0,DDD -5,0,3,1,Player0,Player3,DCC -5,3,0,1,Player3,Player0,DDD -6,0,4,2,Player0,Player4,DDD -6,4,0,2,Player4,Player0,DDD -7,0,4,3,Player0,Player4,DDD -7,4,0,3,Player4,Player0,DDD""" + """Interaction index,Player index,Opponent index,Repetition,IpdPlayer name,Opponent name,Actions +0,0,1,0,IpdPlayer0,IpdPlayer1,CCC +0,1,0,0,IpdPlayer1,IpdPlayer0,DDD +1,0,1,1,IpdPlayer0,IpdPlayer1,CCC +1,1,0,1,IpdPlayer1,IpdPlayer0,DDD +2,0,2,0,IpdPlayer0,IpdPlayer2,CCD +2,2,0,0,IpdPlayer2,IpdPlayer0,DDD +3,0,2,1,IpdPlayer0,IpdPlayer2,CCC +3,2,0,1,IpdPlayer2,IpdPlayer0,DDD +4,0,3,0,IpdPlayer0,IpdPlayer3,CCD +4,3,0,0,IpdPlayer3,IpdPlayer0,DDD +5,0,3,1,IpdPlayer0,IpdPlayer3,DCC +5,3,0,1,IpdPlayer3,IpdPlayer0,DDD +6,0,4,2,IpdPlayer0,IpdPlayer4,DDD +6,4,0,2,IpdPlayer4,IpdPlayer0,DDD +7,0,4,3,IpdPlayer0,IpdPlayer4,DDD +7,4,0,3,IpdPlayer4,IpdPlayer0,DDD""" ) data = tf.analyse_cooperation_ratio(filename) expected_data = np.array( diff --git a/axelrod/tests/unit/test_game.py b/axelrod/tests/unit/test_game.py index c52d22b7b..60d50ac76 100644 --- a/axelrod/tests/unit/test_game.py +++ b/axelrod/tests/unit/test_game.py @@ -17,29 +17,29 @@ def test_default_scores(self): (D, D): (1, 1), (C, C): (3, 3), } - self.assertEqual(axl.Game().scores, expected_scores) + self.assertEqual(axl.IpdGame().scores, expected_scores) def test_default_RPST(self): expected_values = (3, 1, 0, 5) - self.assertEqual(axl.Game().RPST(), expected_values) + self.assertEqual(axl.IpdGame().RPST(), expected_values) def test_default_score(self): - game = axl.Game() + game = axl.IpdGame() self.assertEqual(game.score((C, C)), (3, 3)) self.assertEqual(game.score((D, D)), (1, 1)) self.assertEqual(game.score((C, D)), (0, 5)) self.assertEqual(game.score((D, C)), (5, 0)) def test_default_equality(self): - self.assertEqual(axl.Game(), axl.Game()) + self.assertEqual(axl.IpdGame(), axl.IpdGame()) def test_not_default_equality(self): - self.assertEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 4)) - self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 5)) - self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game()) + self.assertEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 4)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 5)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame()) def test_wrong_class_equality(self): - self.assertNotEqual(axl.Game(), "wrong class") + self.assertNotEqual(axl.IpdGame(), "wrong class") @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) @@ -51,21 +51,21 @@ def test_random_init(self, r, p, s, t): (D, D): (p, p), (C, C): (r, r), } - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.scores, expected_scores) @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) def test_random_RPST(self, r, p, s, t): """Test RPST method with random scores using the hypothesis library.""" - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.RPST(), (r, p, s, t)) @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) def test_random_score(self, r, p, s, t): """Test score method with random scores using the hypothesis library.""" - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.score((C, C)), (r, r)) self.assertEqual(game.score((D, D)), (p, p)) self.assertEqual(game.score((C, D)), (s, t)) diff --git a/axelrod/tests/unit/test_interaction_utils.py b/axelrod/tests/unit/test_interaction_utils.py index 2e1d2c5e1..6ddda717c 100644 --- a/axelrod/tests/unit/test_interaction_utils.py +++ b/axelrod/tests/unit/test_interaction_utils.py @@ -129,7 +129,7 @@ def test_compute_sparklines(self): def test_read_interactions_from_file(self): tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players=players, turns=2, repetitions=3) + tournament = axl.IpdTournament(players=players, turns=2, repetitions=3) tournament.play(filename=tmp_file.name) tmp_file.close() expected_interactions = { diff --git a/axelrod/tests/unit/test_ipd_adapter.py b/axelrod/tests/unit/test_ipd_adapter.py new file mode 100644 index 000000000..01eec6eec --- /dev/null +++ b/axelrod/tests/unit/test_ipd_adapter.py @@ -0,0 +1,1938 @@ +"""Tests adapters defined in ipd_adapter. + +Tests that the public API (public methods and variables with accessors) matches +API on the Ipd versions of Player, Game, Match, and Tournament, by copying +relevant portions of those tests. +""" + +from collections import Counter +import io +import logging +from multiprocessing import Queue, cpu_count +import os +import pathlib +import pickle +import random +import unittest +from unittest.mock import MagicMock, patch +import warnings + +from hypothesis import example, given, settings +from hypothesis.strategies import assume, floats, integers, sampled_from +import numpy as np +import pandas as pd +from tqdm import tqdm + +import axelrod as axl +from axelrod.deterministic_cache import DeterministicCache +from axelrod.load_data_ import axl_filename +from axelrod.player import simultaneous_play +from axelrod.tests.property import ( + games, + prob_end_tournaments, + spatial_tournaments, + strategy_lists, + tournaments, +) +from axelrod.tournament import _close_objects + +C, D = axl.Action.C, axl.Action.D + +test_strategies = [ + axl.Cooperator, + axl.TitForTat, + axl.Defector, + axl.Grudger, + axl.GoByMajority, +] +test_repetitions = 5 +test_turns = 100 + +test_prob_end = 0.5 + +test_edges = [(0, 1), (1, 2), (3, 4)] + +deterministic_strategies = [ + s + for s in axl.short_run_time_strategies + if not axl.Classifiers["stochastic"](s()) +] + +short_run_time_short_mem = [ + s + for s in axl.short_run_time_strategies + if axl.Classifiers["memory_depth"](s()) <= 10 +] + +# Classifiers for TitForTat +_test_classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, +} + + +class RecordedTQDM(tqdm): + """This is a tqdm.tqdm that keeps a record of every RecordedTQDM created. + It is used to test that progress bars were correctly created and then + closed.""" + + record = [] + + def __init__(self, *args, **kwargs): + super(RecordedTQDM, self).__init__(*args, **kwargs) + RecordedTQDM.record.append(self) + + @classmethod + def reset_record(cls): + cls.record = [] + + +class TestGame(unittest.TestCase): + def test_default_scores(self): + expected_scores = { + (C, D): (0, 5), + (D, C): (5, 0), + (D, D): (1, 1), + (C, C): (3, 3), + } + self.assertEqual(axl.Game().scores, expected_scores) + + def test_default_RPST(self): + expected_values = (3, 1, 0, 5) + self.assertEqual(axl.Game().RPST(), expected_values) + + def test_default_score(self): + game = axl.Game() + self.assertEqual(game.score((C, C)), (3, 3)) + self.assertEqual(game.score((D, D)), (1, 1)) + self.assertEqual(game.score((C, D)), (0, 5)) + self.assertEqual(game.score((D, C)), (5, 0)) + + def test_default_equality(self): + self.assertEqual(axl.Game(), axl.Game()) + + def test_not_default_equality(self): + self.assertEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 4)) + self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 5)) + self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game()) + + def test_wrong_class_equality(self): + self.assertNotEqual(axl.Game(), "wrong class") + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_init(self, r, p, s, t): + """Test init with random scores using the hypothesis library.""" + expected_scores = { + (C, D): (s, t), + (D, C): (t, s), + (D, D): (p, p), + (C, C): (r, r), + } + game = axl.Game(r, s, t, p) + self.assertEqual(game.scores, expected_scores) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_RPST(self, r, p, s, t): + """Test RPST method with random scores using the hypothesis library.""" + game = axl.Game(r, s, t, p) + self.assertEqual(game.RPST(), (r, p, s, t)) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_score(self, r, p, s, t): + """Test score method with random scores using the hypothesis library.""" + game = axl.Game(r, s, t, p) + self.assertEqual(game.score((C, C)), (r, r)) + self.assertEqual(game.score((D, D)), (p, p)) + self.assertEqual(game.score((C, D)), (s, t)) + self.assertEqual(game.score((D, C)), (t, s)) + + @given(game=games()) + @settings(max_examples=5) + def test_random_repr(self, game): + """Test repr with random scores using the hypothesis library.""" + expected_repr = "Axelrod game: (R,P,S,T) = {}".format(game.RPST()) + self.assertEqual(expected_repr, game.__repr__()) + self.assertEqual(expected_repr, str(game)) + + def test_scores_setter(self): + expected_scores = { + (C, D): (1, 2), + (D, C): (2, 1), + (D, D): (3, 3), + (C, C): (4, 4), + } + game = axl.Game() + game.scores = expected_scores + self.assertDictEqual(game.scores, expected_scores) + + +class TestMatch(unittest.TestCase): + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_init(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], turns) + self.assertEqual(match._cache, {}) + + @given(prob_end=floats(min_value=0, max_value=1), game=games()) + def test_init_with_prob_end(self, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, float("inf")) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) + self.assertEqual(match._cache, {}) + + @given( + prob_end=floats(min_value=0, max_value=1), + turns=integers(min_value=1, max_value=200), + game=games(), + ) + def test_init_with_prob_end_and_turns(self, turns, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns=turns, prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) + self.assertEqual(match._cache, {}) + + def test_default_init(self): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2)) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, axl.DEFAULT_TURNS) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + + self.assertEqual( + match.players[0].match_attributes["length"], axl.DEFAULT_TURNS + ) + self.assertEqual(match._cache, {}) + + def test_example_prob_end(self): + """ + Test that matches have diff length and also that cache has recorded the + outcomes + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=0.5) + expected_lengths = [3, 1, 5] + for seed, expected_length in zip(range(3), expected_lengths): + axl.seed(seed) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) + self.assertEqual(len(match.play()), expected_length) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + self.assertEqual(len(match._cache), 1) + self.assertEqual(match._cache[(p1, p2)], [(C, C)] * 5) + + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_non_default_attributes(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match_attributes = {"length": 500, "game": game, "noise": 0.5} + match = axl.Match( + (p1, p2), turns, game=game, match_attributes=match_attributes + ) + self.assertEqual(match.players[0].match_attributes["length"], 500) + self.assertEqual(match.players[0].match_attributes["noise"], 0.5) + + @given(turns=integers(min_value=1, max_value=200)) + @example(turns=5) + def test_len(self, turns): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns) + self.assertEqual(len(match), turns) + + def test_len_error(self): + """ + Length is not defined if it is infinite. + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=0.5) + with self.assertRaises(TypeError): + len(match) + + @given(p=floats(min_value=0, max_value=1)) + def test_stochastic(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), 5) + self.assertFalse(match._stochastic) + + match = axl.Match((p1, p2), 5, noise=p) + self.assertTrue(match._stochastic) + + p1 = axl.Random() + match = axl.Match((p1, p2), 5) + self.assertTrue(match._stochastic) + + @given(p=floats(min_value=0, max_value=1)) + def test_cache_update_required(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), 5, noise=p) + self.assertFalse(match._cache_update_required) + + cache = DeterministicCache() + cache.mutable = False + match = axl.Match((p1, p2), 5, deterministic_cache=cache) + self.assertFalse(match._cache_update_required) + + match = axl.Match((p1, p2), 5) + self.assertTrue(match._cache_update_required) + + p1 = axl.Random() + match = axl.Match((p1, p2), 5) + self.assertFalse(match._cache_update_required) + + def test_play(self): + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 3, deterministic_cache=cache) + expected_result = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result) + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], expected_result + ) + + # a deliberately incorrect result so we can tell it came from the cache + expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] + cache[(axl.Cooperator(), axl.Defector())] = expected_result + match = axl.Match(players, 3, deterministic_cache=cache) + self.assertEqual(match.play(), expected_result[:3]) + + def test_cache_grows(self): + """ + We want to make sure that if we try to use the cache for more turns than + what is stored, then it will instead regenerate the result and overwrite + the cache. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 3, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_3_turn) + match.turns = 5 + self.assertEqual(match.play(), expected_result_5_turn) + # The cache should now hold the 5-turn result.. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn + ) + + def test_cache_doesnt_shrink(self): + """ + We want to make sure that when we access the cache looking for fewer + turns than what is stored, then it will not overwrite the cache with the + shorter result. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 5, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_5_turn) + match.turns = 3 + self.assertEqual(match.play(), expected_result_3_turn) + # The cache should still hold the 5. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn + ) + + def test_scores(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + match = axl.Match((player1, player2), 3) + self.assertEqual(match.scores(), []) + match.play() + self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) + + def test_final_score(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (2, 7)) + + match = axl.Match((player2, player1), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (7, 2)) + + def test_final_score_per_turn(self): + turns = 3 + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) + + match = axl.Match((player2, player1), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) + + def test_winner(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + match = axl.Match((player2, player1), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + player1 = axl.Defector() + match = axl.Match((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), False) + + def test_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (3, 2)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (2, 0)) + + def test_normalised_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) + + def test_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2, (C, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2, (D, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + def test_normalised_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2 / turns, (C, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2 / turns, (D, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + def test_sparklines(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players, 4) + match.play() + expected_sparklines = "████\n█ █ " + self.assertEqual(match.sparklines(), expected_sparklines) + expected_sparklines = "XXXX\nXYXY" + self.assertEqual(match.sparklines("X", "Y"), expected_sparklines) + + def test_result_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_result = [(C, C), (C, D), (D, C)] + match.result = expected_result + self.assertListEqual(match.result, expected_result) + + def test_noise_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_noise = 0.123 + match.noise = expected_noise + self.assertAlmostEqual(match.noise, expected_noise) + + def test_game_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_game = axl.Game(1, 2, 3, 4) + match.game = expected_game + self.assertEqual(match.game, expected_game) + + def test_cache_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_cache = axl.DeterministicCache() + expected_cache.mutable = False # Non-default value + match._cache = expected_cache + self.assertFalse(match._cache.mutable) + + def test_prob_end_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_prob_end = 0.123 + match.prob_end = expected_prob_end + self.assertAlmostEqual(match.prob_end, expected_prob_end) + + def test_turns_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_turns = 123 + match.turns = expected_turns + self.assertEqual(match.turns, expected_turns) + + def test_reset_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_reset = False # Non-default value + match.reset = expected_reset + self.assertFalse(match.reset) + + +class TestTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + + cls.expected_payoff = [ + [600, 600, 0, 600, 600], + [600, 600, 199, 600, 600], + [1000, 204, 200, 204, 204], + [600, 600, 199, 600, 600], + [600, 600, 199, 600, 600], + ] + + cls.expected_cooperation = [ + [200, 200, 200, 200, 200], + [200, 200, 1, 200, 200], + [0, 0, 0, 0, 0], + [200, 200, 1, 200, 200], + [200, 200, 1, 200, 200], + ] + + path = pathlib.Path("test_outputs/test_tournament.csv") + cls.filename = axl_filename(path) + + def setUp(self): + self.test_tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=1, + ) + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + noise=0.2, + ) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertIsInstance( + tournament.players[0].match_attributes["game"], axl.IpdGame + ) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, self.test_turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + def test_init_with_match_attributes(self): + tournament = axl.Tournament( + players=self.players, match_attributes={"length": float("inf")} + ) + mg = tournament.match_generator + match_params = mg.build_single_match_params() + self.assertEqual( + match_params["match_attributes"], {"length": float("inf")} + ) + + def test_warning(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=10, + repetitions=1, + ) + with warnings.catch_warnings(record=True) as w: + # Check that a warning is raised if no results set is built and no + # filename is given + results = tournament.play(build_results=False, progress_bar=False) + self.assertEqual(len(w), 1) + + with warnings.catch_warnings(record=True) as w: + # Check that no warning is raised if no results set is built and a + # is filename given + + tournament.play( + build_results=False, filename=self.filename, progress_bar=False + ) + self.assertEqual(len(w), 0) + + def test_setup_output_with_filename(self): + self.test_tournament.setup_output(self.filename) + + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + def test_setup_output_no_filename(self): + self.test_tournament.setup_output() + + self.assertIsInstance(self.test_tournament.filename, str) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + os.close(self.test_tournament._temp_file_descriptor) + os.remove(self.test_tournament.filename) + + def test_play_resets_num_interactions(self): + self.assertEqual(self.test_tournament.num_interactions, 0) + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + def test_play_changes_use_progress_bar(self): + self.assertTrue(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=False) + self.assertFalse(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=True) + self.assertTrue(self.test_tournament.use_progress_bar) + + def test_play_changes_temp_file_descriptor(self): + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # No file descriptor for a named file. + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # Temp file creates file descriptor. + self.test_tournament.play(filename=None, progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + + def test_play_tempfile_removed(self): + self.test_tournament.play(filename=None, progress_bar=False) + + self.assertFalse(os.path.isfile(self.test_tournament.filename)) + + def test_play_resets_filename_and_temp_file_descriptor_each_time(self): + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + old_filename = self.test_tournament.filename + + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertNotEqual(old_filename, self.test_tournament.filename) + + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + self.assertNotEqual(old_filename, self.test_tournament.filename) + self.assertNotEqual(self.test_tournament.filename, self.filename) + + def test_get_file_objects_no_filename(self): + file, writer = self.test_tournament._tournament._get_file_objects() + self.assertIsNone(file) + self.assertIsNone(writer) + + def test_get_file_object_with_filename(self): + self.test_tournament.filename = self.filename + ( + file_object, + writer, + ) = self.test_tournament._tournament._get_file_objects() + self.assertIsInstance(file_object, io.TextIOWrapper) + self.assertEqual(writer.__class__.__name__, "writer") + file_object.close() + + def test_get_progress_bar(self): + self.test_tournament.use_progress_bar = False + pbar = self.test_tournament._tournament._get_progress_bar() + self.assertIsNone(pbar) + + self.test_tournament.use_progress_bar = True + pbar = self.test_tournament._tournament._get_progress_bar() + self.assertIsInstance(pbar, tqdm) + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, self.test_tournament.match_generator.size) + + new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] + new_tournament = axl.Tournament(players=self.players, edges=new_edges) + new_tournament.use_progress_bar = True + pbar = new_tournament._tournament._get_progress_bar() + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, len(new_edges)) + + def test_serial_play(self): + # Test that we get an instance of ResultSet + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + # Test that _run_serial_repetitions is called with empty matches list + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertEqual(tournament.num_interactions, 75) + + def test_serial_play_with_different_game(self): + # Test that a non default game is passed to the result set + game = axl.Game(p=-1, r=-1, s=-1, t=-1) + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=game, + turns=1, + repetitions=1, + ) + results = tournament.play(progress_bar=False) + self.assertLessEqual(np.max(results.scores), 0) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_no_progress_bar_play(self): + """Test that progress bar is not created for progress_bar=False""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # Test with build results + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + # Check that no progress bar was created. + self.assertEqual(RecordedTQDM.record, []) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=False, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(RecordedTQDM.record, []) + + def assert_play_pbar_correct_total_and_finished(self, pbar, total): + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.total, total) + self.assertEqual(pbar.n, total) + self.assertTrue(pbar.disable, True) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play(self): + """Test that progress bar is created by default and with True argument""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + RecordedTQDM.reset_record() + results = tournament.play() + self.assertIsInstance(results, axl.ResultSet) + # Check that progress bar was created, updated and closed. + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + # Check all progress bars are closed. + self.assertTrue(all(pbar.disable for pbar in RecordedTQDM.record)) + + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=True, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(len(RecordedTQDM.record), 1) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play_parallel(self): + """Test that tournament plays when asking for progress bar for parallel + tournament and that progress bar is created.""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # progress_bar = False + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False, processes=2) + self.assertEqual(RecordedTQDM.record, []) + self.assertIsInstance(results, axl.ResultSet) + + # progress_bar = True + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True, processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # progress_bar is default + RecordedTQDM.reset_record() + results = tournament.play(processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @given( + tournament=tournaments( + min_size=2, + max_size=5, + min_turns=2, + max_turns=5, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=50) + @example( + tournament=axl.Tournament( + players=[s() for s in test_strategies], + turns=test_turns, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.MindReader()], + turns=2, + repetitions=1, + ) + ) + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + def test_parallel_play(self): + # Test that we get an instance of ResultSet + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(processes=2, progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + # The following relates to #516 + players = [ + axl.Cooperator(), + axl.Defector(), + axl.BackStabber(), + axl.PSOGambler2_2_2(), + axl.ThueMorse(), + axl.DoubleCrosser(), + ] + tournament = axl.Tournament( + name=self.test_name, + players=players, + game=self.game, + turns=20, + repetitions=self.test_repetitions, + ) + scores = tournament.play(processes=2, progress_bar=False).scores + self.assertEqual(len(scores), len(players)) + + def test_parallel_play_with_writing_to_file(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + results = tournament.play( + processes=2, progress_bar=False, filename=self.filename + ) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + def test_run_serial(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + self.assertTrue(tournament._tournament._run_serial()) + + # Get the calls made to write_interactions + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) + self.assertEqual(len(calls), 15) + + def test_run_parallel(self): + class PickleableMock(MagicMock): + def __reduce__(self): + return MagicMock, () + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._tournament._write_interactions_to_file = PickleableMock( + name="_write_interactions_to_file" + ) + + # For test coverage purposes. This confirms PickleableMock can be + # pickled exactly once. Windows multi-processing must pickle this Mock + # exactly once during testing. + pickled = pickle.loads(pickle.dumps(tournament)) + self.assertIsInstance( + pickled._tournament._write_interactions_to_file, MagicMock + ) + self.assertRaises(pickle.PicklingError, pickle.dumps, pickled) + + self.assertTrue(tournament._tournament._run_parallel()) + + # Get the calls made to write_interactions + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) + self.assertEqual(len(calls), 15) + + def test_n_workers(self): + max_processes = cpu_count() + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual( + tournament._tournament._n_workers(processes=1), max_processes + ) + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual( + tournament._tournament._n_workers(processes=max_processes + 2), + max_processes, + ) + + @unittest.skipIf( + cpu_count() < 2, "not supported on single processor machines" + ) + def test_2_workers(self): + # This is a separate test with a skip condition because we + # cannot guarantee that the tests will always run on a machine + # with more than one processor + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual(tournament._tournament._n_workers(processes=2), 2) + + def test_start_workers(self): + workers = 2 + work_queue = Queue() + done_queue = Queue() + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + chunks = tournament.match_generator.build_match_chunks() + for chunk in chunks: + work_queue.put(chunk) + tournament._tournament._start_workers(workers, work_queue, done_queue) + + stops = 0 + while stops < workers: + payoffs = done_queue.get() + if payoffs == "STOP": + stops += 1 + self.assertEqual(stops, workers) + + def test_worker(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + work_queue = Queue() + chunks = tournament.match_generator.build_match_chunks() + count = 0 + for chunk in chunks: + work_queue.put(chunk) + count += 1 + work_queue.put("STOP") + + done_queue = Queue() + tournament._tournament._worker(work_queue, done_queue) + for r in range(count): + new_matches = done_queue.get() + for index_pair, matches in new_matches.items(): + self.assertIsInstance(index_pair, tuple) + self.assertEqual(len(matches), self.test_repetitions) + queue_stop = done_queue.get() + self.assertEqual(queue_stop, "STOP") + + def test_build_result_set(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + def test_no_build_result_set(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + tournament._tournament._calculate_results = MagicMock( + name="_calculate_results" + ) + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = tournament._tournament._calculate_results.call_args_list + self.assertEqual(len(calls), 0) + + @given(turns=integers(min_value=1, max_value=200)) + @settings(max_examples=5) + @example(turns=3) + @example(turns=axl.DEFAULT_TURNS) + def test_play_matches(self, turns): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + repetitions=self.test_repetitions, + ) + + def make_chunk_generator(): + for player1_index in range(len(self.players)): + for player2_index in range(player1_index, len(self.players)): + index_pair = (player1_index, player2_index) + match_params = {"turns": turns, "game": self.game} + yield (index_pair, match_params, self.test_repetitions) + + chunk_generator = make_chunk_generator() + interactions = {} + for chunk in chunk_generator: + result = tournament._tournament._play_matches(chunk) + for index_pair, inters in result.items(): + try: + interactions[index_pair].append(inters) + except KeyError: + interactions[index_pair] = [inters] + + self.assertEqual(len(interactions), 15) + + for index_pair, inter in interactions.items(): + self.assertEqual(len(index_pair), 2) + for plays in inter: + # Check that have the expected number of repetitions + self.assertEqual(len(plays), self.test_repetitions) + for repetition in plays: + actions, results = repetition + self.assertEqual(len(actions), turns) + self.assertEqual(len(results), 10) + + # Check that matches no longer exist + self.assertEqual((len(list(chunk_generator))), 0) + + def test_match_cache_is_used(self): + """ + Create two Random players that are classified as deterministic. + As they are deterministic the cache will be used. + """ + FakeRandom = axl.Random + FakeRandom.classifier["stochastic"] = False + p1 = FakeRandom() + p2 = FakeRandom() + tournament = axl.Tournament((p1, p2), turns=5, repetitions=2) + results = tournament.play(progress_bar=False) + for player_scores in results.scores: + self.assertEqual(player_scores[0], player_scores[1]) + + def test_write_interactions(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament._tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) + self.assertEqual(len(calls), 15) + + def test_write_to_csv_with_results(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play(filename=self.filename, progress_bar=False) + df = pd.read_csv(self.filename) + path = pathlib.Path("test_outputs/expected_test_tournament.csv") + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + def test_write_to_csv_without_results(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + df = pd.read_csv(self.filename) + path = pathlib.Path( + "test_outputs/expected_test_tournament_no_results.csv" + ) + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + def test_players_setter(self): + expected_players = [axl.Cooperator(), axl.Defector()] + self.test_tournament.players = expected_players + self.assertListEqual(self.test_tournament.players, expected_players) + + def test_game(self): + expected_game = axl.Game(1, 2, 3, 4) + self.test_tournament.players = expected_game + self.assertEqual(self.test_tournament.players, expected_game) + + def test_turns_setter(self): + expected_turns = 123 + self.test_tournament.turns = expected_turns + self.assertEqual(self.test_tournament.turns, expected_turns) + + def test_repetitions_setter(self): + expected_repetitions = 123 + self.test_tournament.repetitions = expected_repetitions + self.assertEqual(self.test_tournament.repetitions, expected_repetitions) + + def test_name_setter(self): + expected_name = "name_to_set" + self.test_tournament.name = expected_name + self.assertEqual(self.test_tournament.name, expected_name) + + def test_noise_setter(self): + expected_noise = 0.123 + self.test_tournament.noise = expected_noise + self.assertAlmostEqual(self.test_tournament.noise, expected_noise) + + def test_match_generator_setter(self): + expected_match_generator_turns = 123 + self.test_tournament.match_generator.turns = ( + expected_match_generator_turns + ) + self.assertEqual( + self.test_tournament.match_generator.turns, + expected_match_generator_turns, + ) + + def test_num_interactions_setter(self): + expected_num_interactions = 123 + self.test_tournament.num_interactions = expected_num_interactions + self.assertEqual( + self.test_tournament.num_interactions, expected_num_interactions + ) + + def test_use_progress_bar_setter(self): + expected_use_progress_bar = False + self.test_tournament.use_progress_bar = expected_use_progress_bar + self.assertFalse(self.test_tournament.use_progress_bar) + + def test_filename_setter(self): + expected_filename = "fn.txt" + self.test_tournament.filename = expected_filename + self.assertEqual(self.test_tournament.filename, expected_filename) + + +class TestProbEndTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + noise=0.2, + ) + self.assertEqual( + tournament.match_generator.prob_end, tournament.prob_end + ) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + tournament=prob_end_tournaments( + min_size=2, + max_size=5, + min_prob_end=0.1, + max_prob_end=0.9, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=5) + @example( + tournament=axl.Tournament( + players=[s() for s in test_strategies], + prob_end=0.2, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.MindReader()], + prob_end=0.2, + repetitions=1, + ) + ) + @example( + tournament=axl.Tournament( + players=[axl.ThueMorse(), axl.MindReader()], + prob_end=0.2, + repetitions=1, + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + def test_prob_end_setter(self): + # create a round robin tournament + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.Tournament(players) + + expected_prob_end = 0.123 + tournament.prob_end = expected_prob_end + self.assertAlmostEqual(tournament.prob_end, expected_prob_end) + + +class TestSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, 100) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + turns=integers(min_value=1, max_value=20), + repetitions=integers(min_value=1, max_value=5), + noise=floats(min_value=0, max_value=1), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament( + self, strategies, turns, repetitions, noise, seed + ): + """ + A test to check that a spatial tournament on the complete multigraph + gives the same results as the round robin. + """ + + players = [s() for s in strategies] + # edges + edges = [] + for i in range(0, len(players)): + for j in range(i, len(players)): + edges.append((i, j)) + + # create a round robin tournament + tournament = axl.Tournament( + players, repetitions=repetitions, turns=turns, noise=noise + ) + # create a complete spatial tournament + spatial_tournament = axl.Tournament( + players, + repetitions=repetitions, + turns=turns, + noise=noise, + edges=edges, + ) + + axl.seed(seed) + results = tournament.play(progress_bar=False) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.num_players, spatial_results.num_players) + self.assertEqual(results.repetitions, spatial_results.repetitions) + self.assertEqual( + results.payoff_diffs_means, spatial_results.payoff_diffs_means + ) + self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) + self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) + self.assertEqual(results.payoffs, spatial_results.payoffs) + self.assertEqual( + results.cooperating_rating, spatial_results.cooperating_rating + ) + self.assertEqual(results.cooperation, spatial_results.cooperation) + self.assertEqual( + results.normalised_cooperation, + spatial_results.normalised_cooperation, + ) + self.assertEqual( + results.normalised_scores, spatial_results.normalised_scores + ) + self.assertEqual( + results.good_partner_matrix, spatial_results.good_partner_matrix + ) + self.assertEqual( + results.good_partner_rating, spatial_results.good_partner_rating + ) + + def test_particular_tournament(self): + """A test for a tournament that has caused failures during some bug + fixing""" + players = [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Grudger(), + ] + edges = [(0, 2), (0, 3), (1, 2), (1, 3)] + tournament = axl.Tournament(players, edges=edges) + results = tournament.play(progress_bar=False) + expected_ranked_names = [ + "Cooperator", + "Tit For Tat", + "Grudger", + "Defector", + ] + self.assertEqual(results.ranked_names, expected_ranked_names) + + # Check that this tournament runs with noise + tournament = axl.Tournament(players, edges=edges, noise=0.5) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + def test_edges_setter(self): + # create a round robin tournament + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.Tournament(players) + + expected_edges = [(1, 2), (3, 4)] + tournament.edges = expected_edges + self.assertListEqual(tournament.edges, expected_edges) + + +class TestProbEndingSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + self.assertEqual(tournament.prob_end, self.test_prob_end) + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + prob_end=floats(min_value=0.1, max_value=0.9), + reps=integers(min_value=1, max_value=3), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament(self, strategies, prob_end, seed, reps): + """ + A test to check that a spatial tournament on the complete graph + gives the same results as the round robin. + """ + players = [s() for s in strategies] + + # create a prob end round robin tournament + tournament = axl.Tournament( + players, prob_end=prob_end, repetitions=reps + ) + axl.seed(seed) + results = tournament.play(progress_bar=False) + + # create a complete spatial tournament + # edges + edges = [ + (i, j) for i in range(len(players)) for j in range(i, len(players)) + ] + + spatial_tournament = axl.Tournament( + players, prob_end=prob_end, repetitions=reps, edges=edges + ) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + self.assertEqual(results.match_lengths, spatial_results.match_lengths) + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.wins, spatial_results.wins) + self.assertEqual(results.scores, spatial_results.scores) + self.assertEqual(results.cooperation, spatial_results.cooperation) + + @given( + tournament=spatial_tournaments( + strategies=axl.basic_strategies, + max_turns=1, + max_noise=0, + max_repetitions=3, + ), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_one_turn_tournament(self, tournament, seed): + """ + Tests that gives same result as the corresponding spatial round robin + spatial tournament + """ + prob_end_tour = axl.Tournament( + tournament.players, + prob_end=1, + edges=tournament.edges, + repetitions=tournament.repetitions, + ) + axl.seed(seed) + prob_end_results = prob_end_tour.play(progress_bar=False) + axl.seed(seed) + one_turn_results = tournament.play(progress_bar=False) + self.assertEqual(prob_end_results.scores, one_turn_results.scores) + self.assertEqual(prob_end_results.wins, one_turn_results.wins) + self.assertEqual( + prob_end_results.cooperation, one_turn_results.cooperation + ) + + +class TestHelperFunctions(unittest.TestCase): + def test_close_objects_with_none(self): + self.assertIsNone(_close_objects(None, None)) + + def test_close_objects_with_file_objs(self): + f1 = open("to_delete_1", "w") + f2 = open("to_delete_2", "w") + f2.close() + f2 = open("to_delete_2", "r") + + self.assertFalse(f1.closed) + self.assertFalse(f2.closed) + + _close_objects(f1, f2) + + self.assertTrue(f1.closed) + self.assertTrue(f2.closed) + + os.remove("to_delete_1") + os.remove("to_delete_2") + + def test_close_objects_with_tqdm(self): + pbar_1 = tqdm(range(5)) + pbar_2 = tqdm(total=10, desc="hi", file=io.StringIO()) + + self.assertFalse(pbar_1.disable) + self.assertFalse(pbar_2.disable) + + _close_objects(pbar_1, pbar_2) + + self.assertTrue(pbar_1.disable) + self.assertTrue(pbar_2.disable) + + def test_close_objects_with_different_objects(self): + file = open("to_delete_1", "w") + pbar = tqdm(range(5)) + num = 5 + empty = None + word = "hi" + + _close_objects(file, pbar, num, empty, word) + + self.assertTrue(pbar.disable) + self.assertTrue(file.closed) + + os.remove("to_delete_1") + + +class TestAdapterTitForTat(axl.Player): + name = "Tit For Tat" + classifier = _test_classifier + + def strategy(self, opponent) -> axl.Action: + """This is the actual strategy""" + # First move + if not self.history: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + return D + return C + + +def test_memory(player, opponent, memory_length, seed=0, turns=10): + """ + Checks if a player reacts to the plays of an opponent in the same way if + only the given amount of memory is used. + """ + # Play the match normally. + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns) + plays = [p[0] for p in match.play()] + + # Play with limited history. + player.reset() + opponent.reset() + player._history = axl.LimitedHistory(memory_length) + opponent._history = axl.LimitedHistory(memory_length) + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns, reset=False) + limited_plays = [p[0] for p in match.play()] + + return plays == limited_plays + + +class TestPlayer(unittest.TestCase): + """Test Player on TestAdapterTitForTat.""" + + player = TestAdapterTitForTat + name = "TestAdapterTitForTat" + expected_class_classifier = _test_classifier + + def test_initialisation(self): + """Test that the player initiates correctly.""" + if self.__class__ != TestPlayer: + player = self.player() + self.assertEqual(len(player.history), 0) + self.assertEqual( + player.match_attributes, + {"length": -1, "game": axl.DefaultGame, "noise": 0}, + ) + self.assertEqual(player.cooperations, 0) + self.assertEqual(player.defections, 0) + # self.classifier_test(self.expected_class_classifier) + + def test_repr(self): + """Test that the representation is correct.""" + if self.__class__ != TestPlayer: + self.assertEqual(str(self.player()), self.name) + + def test_match_attributes(self): + player = self.player() + # Default + player.set_match_attributes() + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], -1) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Common + player.set_match_attributes(length=200) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], 200) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Noisy + player.set_match_attributes(length=200, noise=0.5) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["noise"], 0.5) + + def equality_of_players_test(self, p1, p2, seed, opponent): + a1 = opponent() + a2 = opponent() + self.assertEqual(p1, p2) + for player, op in [(p1, a1), (p2, a2)]: + axl.seed(seed) + for _ in range(10): + simultaneous_play(player, op) + self.assertEqual(p1, p2) + p1 = pickle.loads(pickle.dumps(p1)) + p2 = pickle.loads(pickle.dumps(p2)) + self.assertEqual(p1, p2) + + @given( + opponent=sampled_from(short_run_time_short_mem), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_clone(self, seed, opponent): + p1 = self.player() + p2 = p1.clone() + self.equality_of_players_test(p1, p2, seed, opponent) + + @given( + opponent=sampled_from(axl.short_run_time_strategies), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_pickle_clone(self, seed, opponent): + p1 = self.player() + p2 = pickle.loads(pickle.dumps(p1)) + self.equality_of_players_test(p1, p2, seed, opponent) + + def test_reset_history_and_attributes(self): + """Make sure resetting works correctly.""" + for opponent in [ + axl.Defector(), + axl.Random(), + axl.Alternator(), + axl.Cooperator(), + ]: + + player = self.player() + clone = player.clone() + for seed in range(10): + axl.seed(seed) + player.play(opponent) + + player.reset() + self.assertEqual(player, clone) + + def test_reset_clone(self): + """Make sure history resetting with cloning works correctly, regardless + if self.test_reset() is overwritten.""" + player = self.player() + clone = player.clone() + self.assertEqual(player, clone) + + @given(seed=integers(min_value=1, max_value=20000000)) + @settings(max_examples=1) + def test_clone(self, seed): + # Test that the cloned player produces identical play + player1 = self.player() + if player1.name in ["Darwin", "Human"]: + # Known exceptions + return + player2 = player1.clone() + self.assertEqual(len(player2.history), 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 0) + self.assertEqual(player2.state_distribution, {}) + self.assertEqual(player2.classifier, player1.classifier) + self.assertEqual(player2.match_attributes, player1.match_attributes) + + turns = 50 + r = random.random() + for op in [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Random(p=r), + ]: + player1.reset() + player2.reset() + for p in [player1, player2]: + axl.seed(seed) + m = axl.IpdMatch((p, op), turns=turns) + m.play() + self.assertEqual(len(player1.history), turns) + self.assertEqual(player1.history, player2.history) + + @given( + strategies=strategy_lists( + max_size=5, strategies=short_run_time_short_mem + ), + seed=integers(min_value=1, max_value=200), + turns=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_memory_depth_upper_bound(self, strategies, seed, turns): + """ + Test that the memory depth is indeed an upper bound. + """ + + def get_memory_depth_or_zero(player): + # Some of the test strategies have no entry in the classifiers + # table, so there isn't logic to load default value of zero. + memory = axl.Classifiers["memory_depth"](player) + return memory if memory else 0 + + player = self.player() + memory = get_memory_depth_or_zero(player) + if memory < float("inf"): + for strategy in strategies: + player.reset() + opponent = strategy() + max_memory = max(memory, get_memory_depth_or_zero(opponent)) + self.assertTrue( + test_memory( + player=player, + opponent=opponent, + seed=seed, + turns=turns, + memory_length=max_memory, + ), + msg="{} failed for seed={} and opponent={}".format( + player.name, seed, opponent + ), + ) diff --git a/axelrod/tests/unit/test_load_data.py b/axelrod/tests/unit/test_load_data.py index 6dd880335..d4f92925e 100644 --- a/axelrod/tests/unit/test_load_data.py +++ b/axelrod/tests/unit/test_load_data.py @@ -7,7 +7,7 @@ class TestLoadData(unittest.TestCase): def test_axl_filename(self): - path = pathlib.Path("axelrod/strategies/titfortat.py") + path = pathlib.Path("ipd/strategies/titfortat.py") actual_fn = axl_filename(path) # First go from "unit" up to "tests", then up to "axelrod" diff --git a/axelrod/tests/unit/test_match.py b/axelrod/tests/unit/test_match.py index 71913d103..92d226242 100644 --- a/axelrod/tests/unit/test_match.py +++ b/axelrod/tests/unit/test_match.py @@ -17,7 +17,7 @@ class TestMatch(unittest.TestCase): @example(turns=5, game=axl.DefaultGame) def test_init(self, turns, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns, game=game) + match = axl.IpdMatch((p1, p2), turns, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, turns) @@ -31,7 +31,7 @@ def test_init(self, turns, game): @given(prob_end=floats(min_value=0, max_value=1), game=games()) def test_init_with_prob_end(self, prob_end, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=prob_end, game=game) + match = axl.IpdMatch((p1, p2), prob_end=prob_end, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, float("inf")) @@ -49,7 +49,7 @@ def test_init_with_prob_end(self, prob_end, game): ) def test_init_with_prob_end_and_turns(self, turns, prob_end, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns=turns, prob_end=prob_end, game=game) + match = axl.IpdMatch((p1, p2), turns=turns, prob_end=prob_end, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, turns) @@ -62,7 +62,7 @@ def test_init_with_prob_end_and_turns(self, turns, prob_end, game): def test_default_init(self): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2)) + match = axl.IpdMatch((p1, p2)) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, axl.DEFAULT_TURNS) @@ -81,7 +81,7 @@ def test_example_prob_end(self): outcomes """ p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=0.5) + match = axl.IpdMatch((p1, p2), prob_end=0.5) expected_lengths = [3, 1, 5] for seed, expected_length in zip(range(3), expected_lengths): axl.seed(seed) @@ -97,7 +97,7 @@ def test_example_prob_end(self): def test_non_default_attributes(self, turns, game): p1, p2 = axl.Cooperator(), axl.Cooperator() match_attributes = {"length": 500, "game": game, "noise": 0.5} - match = axl.Match( + match = axl.IpdMatch( (p1, p2), turns, game=game, match_attributes=match_attributes ) self.assertEqual(match.players[0].match_attributes["length"], 500) @@ -107,7 +107,7 @@ def test_non_default_attributes(self, turns, game): @example(turns=5) def test_len(self, turns): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns) + match = axl.IpdMatch((p1, p2), turns) self.assertEqual(len(match), turns) def test_len_error(self): @@ -115,7 +115,7 @@ def test_len_error(self): Length is not defined if it is infinite. """ p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=0.5) + match = axl.IpdMatch((p1, p2), prob_end=0.5) with self.assertRaises(TypeError): len(match) @@ -125,14 +125,14 @@ def test_stochastic(self, p): assume(0 < p < 1) p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertFalse(match._stochastic) - match = axl.Match((p1, p2), 5, noise=p) + match = axl.IpdMatch((p1, p2), 5, noise=p) self.assertTrue(match._stochastic) p1 = axl.Random() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertTrue(match._stochastic) @given(p=floats(min_value=0, max_value=1)) @@ -141,25 +141,25 @@ def test_cache_update_required(self, p): assume(0 < p < 1) p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), 5, noise=p) + match = axl.IpdMatch((p1, p2), 5, noise=p) self.assertFalse(match._cache_update_required) cache = DeterministicCache() cache.mutable = False - match = axl.Match((p1, p2), 5, deterministic_cache=cache) + match = axl.IpdMatch((p1, p2), 5, deterministic_cache=cache) self.assertFalse(match._cache_update_required) - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertTrue(match._cache_update_required) p1 = axl.Random() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertFalse(match._cache_update_required) def test_play(self): cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) expected_result = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result) self.assertEqual( @@ -169,7 +169,7 @@ def test_play(self): # a deliberately incorrect result so we can tell it came from the cache expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] cache[(axl.Cooperator(), axl.Defector())] = expected_result - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) self.assertEqual(match.play(), expected_result[:3]) def test_cache_grows(self): @@ -180,7 +180,7 @@ def test_cache_grows(self): """ cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] expected_result_3_turn = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result_3_turn) @@ -200,7 +200,7 @@ def test_cache_doesnt_shrink(self): """ cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 5, deterministic_cache=cache) + match = axl.IpdMatch(players, 5, deterministic_cache=cache) expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] expected_result_3_turn = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result_5_turn) @@ -215,7 +215,7 @@ def test_cache_doesnt_shrink(self): def test_scores(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.scores(), []) match.play() self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) @@ -224,12 +224,12 @@ def test_final_score(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.final_score(), None) match.play() self.assertEqual(match.final_score(), (2, 7)) - match = axl.Match((player2, player1), 3) + match = axl.IpdMatch((player2, player1), 3) self.assertEqual(match.final_score(), None) match.play() self.assertEqual(match.final_score(), (7, 2)) @@ -239,12 +239,12 @@ def test_final_score_per_turn(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.final_score_per_turn(), None) match.play() self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) - match = axl.Match((player2, player1), turns) + match = axl.IpdMatch((player2, player1), turns) self.assertEqual(match.final_score_per_turn(), None) match.play() self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) @@ -253,18 +253,18 @@ def test_winner(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), player2) - match = axl.Match((player2, player1), 3) + match = axl.IpdMatch((player2, player1), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), player2) player1 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), False) @@ -274,7 +274,7 @@ def test_cooperation(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.cooperation(), None) match.play() self.assertEqual(match.cooperation(), (3, 2)) @@ -282,7 +282,7 @@ def test_cooperation(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.cooperation(), None) match.play() self.assertEqual(match.cooperation(), (2, 0)) @@ -292,7 +292,7 @@ def test_normalised_cooperation(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_cooperation(), None) match.play() self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) @@ -300,7 +300,7 @@ def test_normalised_cooperation(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_cooperation(), None) match.play() self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) @@ -310,7 +310,7 @@ def test_state_distribution(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.state_distribution(), None) match.play() @@ -320,7 +320,7 @@ def test_state_distribution(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.state_distribution(), None) match.play() @@ -332,7 +332,7 @@ def test_normalised_state_distribution(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_state_distribution(), None) match.play() @@ -342,7 +342,7 @@ def test_normalised_state_distribution(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_state_distribution(), None) match.play() @@ -351,7 +351,7 @@ def test_normalised_state_distribution(self): def test_sparklines(self): players = (axl.Cooperator(), axl.Alternator()) - match = axl.Match(players, 4) + match = axl.IpdMatch(players, 4) match.play() expected_sparklines = "████\n█ █ " self.assertEqual(match.sparklines(), expected_sparklines) @@ -368,10 +368,10 @@ def test_sample_length(self): (3, 0.4, 1), ]: axl.seed(seed) - self.assertEqual(axl.match.sample_length(prob_end), expected_length) + self.assertEqual(axl.ipd.match.sample_length(prob_end), expected_length) def test_sample_with_0_prob(self): - self.assertEqual(axl.match.sample_length(0), float("inf")) + self.assertEqual(axl.ipd.match.sample_length(0), float("inf")) def test_sample_with_1_prob(self): - self.assertEqual(axl.match.sample_length(1), 1) + self.assertEqual(axl.ipd.match.sample_length(1), 1) diff --git a/axelrod/tests/unit/test_match_generator.py b/axelrod/tests/unit/test_match_generator.py index 27faa78c0..c4c351eaa 100644 --- a/axelrod/tests/unit/test_match_generator.py +++ b/axelrod/tests/unit/test_match_generator.py @@ -15,7 +15,7 @@ ] test_turns = 100 test_repetitions = 20 -test_game = axl.Game() +test_game = axl.IpdGame() class TestMatchGenerator(unittest.TestCase): @@ -40,8 +40,8 @@ def test_build_single_match_params(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), test_turns) def test_build_single_match_params_with_noise(self): @@ -62,8 +62,8 @@ def test_build_single_match_params_with_noise(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), test_turns) def test_build_single_match_params_with_prob_end(self): @@ -83,8 +83,8 @@ def test_build_single_match_params_with_prob_end(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) with self.assertRaises(TypeError): len(match) @@ -106,8 +106,8 @@ def test_build_single_match_params_with_prob_end_and_noise(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) with self.assertRaises(TypeError): len(match) @@ -129,8 +129,8 @@ def test_build_single_match_params_with_prob_end_and_turns(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertIsInstance(len(match), int) self.assertGreater(len(match), 0) self.assertLessEqual(len(match), 10) @@ -154,8 +154,8 @@ def test_build_single_match_params_with_fixed_length_unknown(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), 5) self.assertEqual(match.match_attributes, {"length": float("inf")}) diff --git a/axelrod/tests/unit/test_mock_player.py b/axelrod/tests/unit/test_mock_player.py index 457e77711..a089d5d6c 100644 --- a/axelrod/tests/unit/test_mock_player.py +++ b/axelrod/tests/unit/test_mock_player.py @@ -9,12 +9,12 @@ class TestMockPlayer(unittest.TestCase): def test_strategy(self): for action in [C, D]: m = axl.MockPlayer(actions=[action]) - p2 = axl.Player() + p2 = axl.IpdPlayer() self.assertEqual(action, m.strategy(p2)) actions = [C, C, D, D, C, C] m = axl.MockPlayer(actions=actions) - p2 = axl.Player() + p2 = axl.IpdPlayer() for action in actions: self.assertEqual(action, m.strategy(p2)) diff --git a/axelrod/tests/unit/test_moran.py b/axelrod/tests/unit/test_moran.py index d972b288f..58b83c0e1 100644 --- a/axelrod/tests/unit/test_moran.py +++ b/axelrod/tests/unit/test_moran.py @@ -169,7 +169,7 @@ def test_different_game(self): # Possible for Cooperator to become fixed when using a different game p1, p2 = axl.Cooperator(), axl.Defector() axl.seed(0) - game = axl.Game(r=4, p=2, s=1, t=6) + game = axl.IpdGame(r=4, p=2, s=1, t=6) mp = axl.MoranProcess((p1, p2), turns=5, game=game) populations = mp.play() self.assertEqual(mp.winning_strategy_name, str(p1)) diff --git a/axelrod/tests/unit/test_pickling.py b/axelrod/tests/unit/test_pickling.py index 1cb14101d..b588c1b81 100644 --- a/axelrod/tests/unit/test_pickling.py +++ b/axelrod/tests/unit/test_pickling.py @@ -11,10 +11,10 @@ # First set: special cases -PointerToWrappedStrategy = axl.strategy_transformers.FlipTransformer()(axl.strategy_transformers.FlipTransformer()(axl.Cooperator)) +PointerToWrappedStrategy = axl.ipd.strategy_transformers.FlipTransformer()(axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)) -class MyDefector(axl.Player): +class MyDefector(axl.IpdPlayer): def __init__(self): super(MyDefector, self).__init__() @@ -22,40 +22,40 @@ def strategy(self, opponent): return D -PointerToWrappedClassNotInStrategies = axl.strategy_transformers.FlipTransformer()( - axl.strategy_transformers.FlipTransformer()(MyDefector) +PointerToWrappedClassNotInStrategies = axl.ipd.strategy_transformers.FlipTransformer()( + axl.ipd.strategy_transformers.FlipTransformer()(MyDefector) ) -@axl.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) -@axl.strategy_transformers.DualTransformer(name_prefix=None) -@axl.strategy_transformers.FlipTransformer(name_prefix=None) -@axl.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) class InterspersedDualTransformersNamePrefixAbsent(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer((D, D, C)) -@axl.strategy_transformers.DualTransformer() -@axl.strategy_transformers.FlipTransformer() -@axl.strategy_transformers.DualTransformer() +@axl.ipd.strategy_transformers.IdentityTransformer((D, D, C)) +@axl.ipd.strategy_transformers.DualTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.DualTransformer() class InterspersedDualTransformersNamePrefixPresent(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer() -class MyCooperator(axl.Player): +@axl.ipd.strategy_transformers.FlipTransformer() +class MyCooperator(axl.IpdPlayer): def strategy(self, opponent): return C -@axl.strategy_transformers.FlipTransformer() -@axl.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() class DoubleFlip(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() class SingleFlip(axl.Cooperator): pass @@ -63,47 +63,47 @@ class SingleFlip(axl.Cooperator): # Second set: All the transformers -@axl.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) +@axl.ipd.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) class Apology(axl.Cooperator): pass -@axl.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) class DeadlockBreaking(axl.Cooperator): pass -@axl.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) class Dual(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) class Flip(axl.Cooperator): pass -@axl.strategy_transformers.FinalTransformer((D, D), name_prefix=None) +@axl.ipd.strategy_transformers.FinalTransformer((D, D), name_prefix=None) class Final(axl.Cooperator): pass -@axl.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) +@axl.ipd.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) class Forgiver(axl.Cooperator): pass -@axl.strategy_transformers.GrudgeTransformer(3, name_prefix=None) +@axl.ipd.strategy_transformers.GrudgeTransformer(3, name_prefix=None) class Grudge(axl.Cooperator): pass -@axl.strategy_transformers.InitialTransformer((C, D), name_prefix=None) +@axl.ipd.strategy_transformers.InitialTransformer((C, D), name_prefix=None) class Initial(axl.Cooperator): pass -@axl.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) +@axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) class JossAnn(axl.Cooperator): pass @@ -112,42 +112,42 @@ class JossAnn(axl.Cooperator): probability = [0.2, 0.3] -@axl.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) +@axl.ipd.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) class Mixed(axl.Cooperator): pass -@axl.strategy_transformers.NiceTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.NiceTransformer(name_prefix=None) class Nice(axl.Cooperator): pass -@axl.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) +@axl.ipd.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) class Noisy(axl.Cooperator): pass -@axl.strategy_transformers.RetaliationTransformer(3, name_prefix=None) +@axl.ipd.strategy_transformers.RetaliationTransformer(3, name_prefix=None) class Retaliation(axl.Cooperator): pass -@axl.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) class RetaliateUntilApology(axl.Cooperator): pass -@axl.strategy_transformers.TrackHistoryTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.TrackHistoryTransformer(name_prefix=None) class TrackHistory(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer() +@axl.ipd.strategy_transformers.IdentityTransformer() class Identity(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None) class TransformedThue(axl.ThueMorse): pass @@ -160,7 +160,7 @@ def __init__(self): super().__init__(team=team) -TransformedMetaThue = axl.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) +TransformedMetaThue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) transformed_no_prefix = [ @@ -183,22 +183,22 @@ def __init__(self): ] transformer_instances = [ - axl.strategy_transformers.ApologyTransformer([D], [C]), - axl.strategy_transformers.DeadlockBreakingTransformer(), - axl.strategy_transformers.DualTransformer(), - axl.strategy_transformers.FlipTransformer(), - axl.strategy_transformers.FinalTransformer((D, D)), - axl.strategy_transformers.ForgiverTransformer(0.2), - axl.strategy_transformers.GrudgeTransformer(3), - axl.strategy_transformers.InitialTransformer((C, D)), - axl.strategy_transformers.JossAnnTransformer((0.2, 0.6)), - axl.strategy_transformers.MixedTransformer(probability, strategies), - axl.strategy_transformers.NiceTransformer(), - axl.strategy_transformers.NoisyTransformer(0.2), - axl.strategy_transformers.RetaliationTransformer(3), - axl.strategy_transformers.RetaliateUntilApologyTransformer(), - axl.strategy_transformers.TrackHistoryTransformer(), - axl.strategy_transformers.IdentityTransformer(), + axl.ipd.strategy_transformers.ApologyTransformer([D], [C]), + axl.ipd.strategy_transformers.DeadlockBreakingTransformer(), + axl.ipd.strategy_transformers.DualTransformer(), + axl.ipd.strategy_transformers.FlipTransformer(), + axl.ipd.strategy_transformers.FinalTransformer((D, D)), + axl.ipd.strategy_transformers.ForgiverTransformer(0.2), + axl.ipd.strategy_transformers.GrudgeTransformer(3), + axl.ipd.strategy_transformers.InitialTransformer((C, D)), + axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.6)), + axl.ipd.strategy_transformers.MixedTransformer(probability, strategies), + axl.ipd.strategy_transformers.NiceTransformer(), + axl.ipd.strategy_transformers.NoisyTransformer(0.2), + axl.ipd.strategy_transformers.RetaliationTransformer(3), + axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(), + axl.ipd.strategy_transformers.TrackHistoryTransformer(), + axl.ipd.strategy_transformers.IdentityTransformer(), ] @@ -219,11 +219,11 @@ def assert_original_equals_pickled(self, player_, turns=10): opponent_2 = opponent_class() axl.seed(0) - match_1 = axl.Match((player, opponent_1), turns=turns) + match_1 = axl.IpdMatch((player, opponent_1), turns=turns) result_1 = match_1.play() axl.seed(0) - match_2 = axl.Match((clone, opponent_2), turns=turns) + match_2 = axl.IpdMatch((clone, opponent_2), turns=turns) result_2 = match_2.play() self.assertEqual(result_1, result_2) @@ -236,7 +236,7 @@ def test_parameterized_player(self): self.assert_original_equals_pickled(player) def test_sequence_player(self): - inline_transformed_thue = axl.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() + inline_transformed_thue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() for player in [axl.ThueMorse(), axl.ThueMorseInverse(), MetaThue(), TransformedMetaThue(), inline_transformed_thue, TransformedThue(), ]: @@ -246,14 +246,14 @@ def test_sequence_player(self): axl.seed(10) player.reset() opponent = opponent_class() - match_1 = axl.Match((player, opponent), turns=20) + match_1 = axl.IpdMatch((player, opponent), turns=20) _ = match_1.play() self.assert_equals_instance_from_pickling(player) def test_final_transformer_called(self): player = axl.Alexei() copy = pickle.loads(pickle.dumps(player)) - match = axl.Match((player, copy), turns=3) + match = axl.IpdMatch((player, copy), turns=3) results = match.play() self.assertEqual(results, [(C, C), (C, C), (D, D)]) @@ -275,9 +275,9 @@ def test_pickling_all_transformers_as_instance_called_on_a_class(self): self.assert_original_equals_pickled(player) def test_created_on_the_spot_multiple_transformers(self): - player_class = axl.strategy_transformers.FlipTransformer()(axl.Cooperator) - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player = axl.strategy_transformers.FinalTransformer((C, D))(player_class)() + player_class = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player = axl.ipd.strategy_transformers.FinalTransformer((C, D))(player_class)() self.assert_original_equals_pickled(player) @@ -293,10 +293,10 @@ def test_dual_transformer_regression_test(self): self.assert_original_equals_pickled(player) player_class = axl.WinStayLoseShift - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.InitialTransformer((C, D))(player_class) - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.TrackHistoryTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.InitialTransformer((C, D))(player_class) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.TrackHistoryTransformer()(player_class) interspersed_dual_transformers = player_class() @@ -318,7 +318,7 @@ def test_class_and_instance_name_different_built_from_player_class(self): player = MyCooperator() class_names = [class_.__name__ for class_ in MyCooperator.mro()] self.assertEqual( - class_names, ["FlippedMyCooperator", "MyCooperator", "Player", "object"] + class_names, ["FlippedMyCooperator", "MyCooperator", "IpdPlayer", "object"] ) self.assert_original_equals_pickled(player) @@ -333,14 +333,14 @@ def test_pointer_to_class_derived_from_strategy(self): "FlippedFlippedCooperator", "FlippedCooperator", "Cooperator", - "Player", + "IpdPlayer", "object", ], ) self.assert_original_equals_pickled(player) - def test_pointer_to_class_derived_from_Player(self): + def test_pointer_to_class_derived_from_IpdPlayer(self): player = PointerToWrappedClassNotInStrategies() class_names = [class_.__name__ for class_ in player.__class__.mro()] @@ -350,7 +350,7 @@ def test_pointer_to_class_derived_from_Player(self): "FlippedFlippedMyDefector", "FlippedMyDefector", "MyDefector", - "Player", + "IpdPlayer", "object", ], ) @@ -369,7 +369,7 @@ class LocalCooperator(axl.Cooperator): self.assertRaises(AttributeError, pickle.dumps, un_transformed) - player = axl.strategy_transformers.FlipTransformer()(LocalCooperator)() + player = axl.ipd.strategy_transformers.FlipTransformer()(LocalCooperator)() pickled = pickle.dumps(player) self.assertRaises(AttributeError, pickle.loads, pickled) @@ -378,17 +378,17 @@ def test_with_various_name_prefixes(self): self.assertEqual(no_prefix.__class__.__name__, "Flip") self.assert_original_equals_pickled(no_prefix) - default_prefix = axl.strategy_transformers.FlipTransformer()(axl.Cooperator)() + default_prefix = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)() self.assertEqual(default_prefix.__class__.__name__, "FlippedCooperator") self.assert_original_equals_pickled(default_prefix) - fliptastic = axl.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") + fliptastic = axl.ipd.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") new_prefix = fliptastic(axl.Cooperator)() self.assertEqual(new_prefix.__class__.__name__, "FliptasticCooperator") self.assert_original_equals_pickled(new_prefix) def test_dynamic_class_no_name_prefix(self): - player = axl.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() + player = axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() self.assertEqual(player.__class__.__name__, "Cooperator") self.assert_original_equals_pickled(player) diff --git a/axelrod/tests/unit/test_plot.py b/axelrod/tests/unit/test_plot.py index 89d40d8d9..d5bdf29a1 100644 --- a/axelrod/tests/unit/test_plot.py +++ b/axelrod/tests/unit/test_plot.py @@ -62,31 +62,31 @@ def setUpClass(cls): ) def test_default_cmap(self): - cmap = axl.plot.default_cmap("0.0") + cmap = axl.ipd.plot.default_cmap("0.0") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.3alpha") + cmap = axl.ipd.plot.default_cmap("1.3alpha") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.4.99") + cmap = axl.ipd.plot.default_cmap("1.4.99") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.4") + cmap = axl.ipd.plot.default_cmap("1.4") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap() + cmap = axl.ipd.plot.default_cmap() self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.5") + cmap = axl.ipd.plot.default_cmap("1.5") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.5beta") + cmap = axl.ipd.plot.default_cmap("1.5beta") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.7") + cmap = axl.ipd.plot.default_cmap("1.7") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("2.0") + cmap = axl.ipd.plot.default_cmap("2.0") self.assertEqual(cmap, "viridis") def test_init(self): @@ -96,7 +96,7 @@ def test_init(self): def test_init_from_resulsetfromfile(self): tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) players = [axl.Cooperator(), axl.TitForTat(), axl.Defector()] - tournament = axl.Tournament(players=players, turns=2, repetitions=2) + tournament = axl.IpdTournament(players=players, turns=2, repetitions=2) tournament.play(filename=tmp_file.name, progress_bar=False) tmp_file.close() rs = axl.ResultSet(tmp_file.name, players, 2, progress_bar=False) diff --git a/axelrod/tests/unit/test_property.py b/axelrod/tests/unit/test_property.py index fbf89cca2..7534ef513 100644 --- a/axelrod/tests/unit/test_property.py +++ b/axelrod/tests/unit/test_property.py @@ -21,7 +21,7 @@ def test_call(self): strategies = strategy_lists().example() self.assertIsInstance(strategies, list) for p in strategies: - self.assertIsInstance(p(), axl.Player) + self.assertIsInstance(p(), axl.IpdPlayer) @given(strategies=strategy_lists(min_size=1, max_size=50)) @settings(max_examples=5) @@ -30,7 +30,7 @@ def test_decorator(self, strategies): self.assertGreaterEqual(len(strategies), 1) self.assertLessEqual(len(strategies), 50) for strategy in strategies: - self.assertIsInstance(strategy(), axl.Player) + self.assertIsInstance(strategy(), axl.IpdPlayer) @given(strategies=strategy_lists(strategies=axl.basic_strategies)) @settings(max_examples=5) @@ -39,7 +39,7 @@ def test_decorator_with_given_strategies(self, strategies): basic_player_names = [str(s()) for s in axl.basic_strategies] for strategy in strategies: player = strategy() - self.assertIsInstance(player, axl.Player) + self.assertIsInstance(player, axl.IpdPlayer) self.assertIn(str(player), basic_player_names) @@ -50,12 +50,12 @@ class TestMatch(unittest.TestCase): def test_call(self): match = matches().example() - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=1)) @settings(max_examples=5) def test_decorator(self, match): - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) self.assertGreaterEqual(len(match), 10) self.assertLessEqual(len(match), 50) self.assertGreaterEqual(match.noise, 0) @@ -64,7 +64,7 @@ def test_decorator(self, match): @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=0)) @settings(max_examples=5) def test_decorator_with_no_noise(self, match): - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) self.assertGreaterEqual(len(match), 10) self.assertLessEqual(len(match), 50) self.assertEqual(match.noise, 0) @@ -73,7 +73,7 @@ def test_decorator_with_no_noise(self, match): class TestTournament(unittest.TestCase): def test_call(self): tournament = tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=tournaments( @@ -88,7 +88,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.turns, 50) self.assertGreaterEqual(tournament.turns, 2) self.assertLessEqual(tournament.noise, 1) @@ -99,7 +99,7 @@ def test_decorator(self, tournament): @given(tournament=tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -108,7 +108,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestProbEndTournament(unittest.TestCase): def test_call(self): tournament = tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=prob_end_tournaments( @@ -123,7 +123,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.prob_end, 1) self.assertGreaterEqual(tournament.prob_end, 0) self.assertLessEqual(tournament.noise, 1) @@ -134,7 +134,7 @@ def test_decorator(self, tournament): @given(tournament=prob_end_tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -143,7 +143,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestSpatialTournament(unittest.TestCase): def test_call(self): tournament = spatial_tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=spatial_tournaments( @@ -158,7 +158,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.turns, 50) self.assertGreaterEqual(tournament.turns, 2) self.assertLessEqual(tournament.noise, 1) @@ -169,7 +169,7 @@ def test_decorator(self, tournament): @given(tournament=spatial_tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -178,7 +178,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestProbEndSpatialTournament(unittest.TestCase): def test_call(self): tournament = prob_end_spatial_tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=prob_end_spatial_tournaments( @@ -193,7 +193,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.prob_end, 1) self.assertGreaterEqual(tournament.prob_end, 0) self.assertLessEqual(tournament.noise, 1) @@ -208,7 +208,7 @@ def test_decorator(self, tournament): ) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -217,16 +217,16 @@ def test_decorator_with_given_strategies(self, tournament): class TestGame(unittest.TestCase): def test_call(self): game = games().example() - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) @given(game=games()) @settings(max_examples=5) def test_decorator(self, game): - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) r, p, s, t = game.RPST() self.assertTrue((2 * r) > (t + s) and (t > r > p > s)) @given(game=games(prisoners_dilemma=False)) @settings(max_examples=5) def test_decorator_unconstrained(self, game): - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/tests/unit/test_resultset.py index 8bd0be3af..fc635c6b4 100644 --- a/axelrod/tests/unit/test_resultset.py +++ b/axelrod/tests/unit/test_resultset.py @@ -2,14 +2,13 @@ import csv from collections import Counter import pandas as pd -from dask.dataframe.core import DataFrame from numpy import mean, nanmedian, std import pathlib import axelrod as axl from axelrod.load_data_ import axl_filename from axelrod.result_set import create_counter_dict -from axelrod.tests.property import prob_end_tournaments, tournaments +from axelrod.tests.property import tournaments from hypothesis import given, settings @@ -497,7 +496,7 @@ def test_self_interaction_for_random_strategies(self): # the copies of the strategy. axl.seed(0) players = [s() for s in axl.demo_strategies] - tournament = axl.Tournament(players, repetitions=2, turns=5) + tournament = axl.IpdTournament(players, repetitions=2, turns=5) results = tournament.play(progress_bar=False) self.assertEqual(results.payoff_diffs_means[-1][-1], 0.0) @@ -511,7 +510,7 @@ def test_equality(self): self.assertEqual(rs_sets[0], rs_sets[1]) players = [s() for s in axl.demo_strategies] - tournament = axl.Tournament(players, repetitions=2, turns=5) + tournament = axl.IpdTournament(players, repetitions=2, turns=5) results = tournament.play(progress_bar=False) self.assertNotEqual(results, rs_sets[0]) @@ -572,7 +571,7 @@ def test_summarise_regression_test(self): axl.TitForTat(), axl.Grudger(), ] - tournament = axl.Tournament(players, turns=10, repetitions=3) + tournament = axl.IpdTournament(players, turns=10, repetitions=3) results = tournament.play() summary = [ @@ -668,7 +667,7 @@ def test_write_summary(self): class TestDecorator(unittest.TestCase): def test_update_progress_bar(self): method = lambda x: None - self.assertEqual(axl.result_set.update_progress_bar(method)(1), None) + self.assertEqual(axl.ipd.result_set.update_progress_bar(method)(1), None) class TestResultSetSpatialStructure(TestResultSet): diff --git a/axelrod/tests/unit/test_strategy_transformers.py b/axelrod/tests/unit/test_strategy_transformers.py index 7b909ab22..dac8b66ac 100644 --- a/axelrod/tests/unit/test_strategy_transformers.py +++ b/axelrod/tests/unit/test_strategy_transformers.py @@ -171,12 +171,12 @@ def test_doc(self): self.assertEqual(player.__doc__, transformer.__doc__) def test_cloning(self): - """Tests that Player.clone preserves the application of transformations. + """Tests that IpdPlayer.clone preserves the application of transformations. """ p1 = axl.Cooperator() p2 = FlipTransformer()(axl.Cooperator)() # Defector p3 = p2.clone() - match = axl.Match((p1, p3), turns=2) + match = axl.IpdMatch((p1, p3), turns=2) results = match.play() self.assertEqual(results, [(C, D), (C, D)]) @@ -187,7 +187,7 @@ def test_generic(self): Cooperator2 = transformer(axl.Cooperator) p1 = Cooperator2() p2 = axl.Cooperator() - match = axl.Match((p1, p2), turns=2) + match = axl.IpdMatch((p1, p2), turns=2) results = match.play() self.assertEqual(results, [(C, C), (C, C)]) @@ -195,7 +195,7 @@ def test_flip_transformer(self): """Tests that FlipTransformer(Cooperator) == Defector.""" p1 = axl.Cooperator() p2 = FlipTransformer()(axl.Cooperator)() # Defector - match = axl.Match((p1, p2), turns=3) + match = axl.IpdMatch((p1, p2), turns=3) results = match.play() self.assertEqual(results, [(C, D), (C, D), (C, D)]) diff --git a/axelrod/tests/unit/test_strategy_utils.py b/axelrod/tests/unit/test_strategy_utils.py index fef7e2af6..e93710a70 100644 --- a/axelrod/tests/unit/test_strategy_utils.py +++ b/axelrod/tests/unit/test_strategy_utils.py @@ -105,8 +105,8 @@ def test_tft_reacts_to_defection(self): class TestLookAhead(unittest.TestCase): def setUp(self): - self.inspector = axl.Player() - self.game = axl.Game() + self.inspector = axl.IpdPlayer() + self.game = axl.IpdGame() def test_cooperator(self): tft = axl.Cooperator() diff --git a/axelrod/tests/unit/test_tournament.py b/axelrod/tests/unit/test_tournament.py index c91abcf46..a3d50db24 100644 --- a/axelrod/tests/unit/test_tournament.py +++ b/axelrod/tests/unit/test_tournament.py @@ -67,7 +67,7 @@ def reset_record(cls): class TestTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -93,7 +93,7 @@ def setUpClass(cls): cls.filename = axl_filename(path) def setUp(self): - self.test_tournament = axl.Tournament( + self.test_tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -102,7 +102,7 @@ def setUp(self): ) def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -110,18 +110,18 @@ def test_init(self): noise=0.2, ) self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.Game) + self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertEqual(tournament.turns, self.test_turns) self.assertEqual(tournament.repetitions, 10) self.assertEqual(tournament.name, "test") self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") def test_init_with_match_attributes(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=self.players, match_attributes={"length": float("inf")} ) mg = tournament.match_generator @@ -129,7 +129,7 @@ def test_init_with_match_attributes(self): self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) def test_warning(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -244,7 +244,7 @@ def test_get_progress_bar(self): self.assertEqual(pbar.total, self.test_tournament.match_generator.size) new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] - new_tournament = axl.Tournament(players=self.players, edges=new_edges) + new_tournament = axl.IpdTournament(players=self.players, edges=new_edges) new_tournament.use_progress_bar = True pbar = new_tournament._get_progress_bar() self.assertEqual(pbar.desc, "Playing matches") @@ -253,7 +253,7 @@ def test_get_progress_bar(self): def test_serial_play(self): # Test that we get an instance of ResultSet - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -264,7 +264,7 @@ def test_serial_play(self): self.assertIsInstance(results, axl.ResultSet) # Test that _run_serial_repetitions is called with empty matches list - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -276,8 +276,8 @@ def test_serial_play(self): def test_serial_play_with_different_game(self): # Test that a non default game is passed to the result set - game = axl.Game(p=-1, r=-1, s=-1, t=-1) - tournament = axl.Tournament( + game = axl.IpdGame(p=-1, r=-1, s=-1, t=-1) + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 ) results = tournament.play(progress_bar=False) @@ -286,7 +286,7 @@ def test_serial_play_with_different_game(self): @patch("tqdm.tqdm", RecordedTQDM) def test_no_progress_bar_play(self): """Test that progress bar is not created for progress_bar=False""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -318,7 +318,7 @@ def assert_play_pbar_correct_total_and_finished(self, pbar, total): @patch("tqdm.tqdm", RecordedTQDM) def test_progress_bar_play(self): """Test that progress bar is created by default and with True argument""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -357,7 +357,7 @@ def test_progress_bar_play(self): def test_progress_bar_play_parallel(self): """Test that tournament plays when asking for progress bar for parallel tournament and that progress bar is created.""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -401,7 +401,7 @@ def test_progress_bar_play_parallel(self): ) @settings(max_examples=50) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[s() for s in test_strategies], turns=test_turns, repetitions=test_repetitions, @@ -411,12 +411,12 @@ def test_progress_bar_play_parallel(self): # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, # these two examples were identified by hypothesis. @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, ) ) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 ) ) @@ -430,7 +430,7 @@ def test_property_serial_play(self, tournament): def test_parallel_play(self): # Test that we get an instance of ResultSet - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -450,7 +450,7 @@ def test_parallel_play(self): axl.ThueMorse(), axl.DoubleCrosser(), ] - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=players, game=self.game, @@ -461,7 +461,7 @@ def test_parallel_play(self): self.assertEqual(len(scores), len(players)) def test_parallel_play_with_writing_to_file(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -476,7 +476,7 @@ def test_parallel_play_with_writing_to_file(self): self.assertEqual(tournament.num_interactions, 75) def test_run_serial(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -497,7 +497,7 @@ class PickleableMock(MagicMock): def __reduce__(self): return MagicMock, () - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -524,7 +524,7 @@ def __reduce__(self): def test_n_workers(self): max_processes = cpu_count() - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -533,7 +533,7 @@ def test_n_workers(self): ) self.assertEqual(tournament._n_workers(processes=1), max_processes) - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -549,7 +549,7 @@ def test_2_workers(self): # This is a separate test with a skip condition because we # cannot guarantee that the tests will always run on a machine # with more than one processor - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -562,7 +562,7 @@ def test_start_workers(self): workers = 2 work_queue = Queue() done_queue = Queue() - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -582,7 +582,7 @@ def test_start_workers(self): self.assertEqual(stops, workers) def test_worker(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -609,7 +609,7 @@ def test_worker(self): self.assertEqual(queue_stop, "STOP") def test_build_result_set(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -620,7 +620,7 @@ def test_build_result_set(self): self.assertIsInstance(results, axl.ResultSet) def test_no_build_result_set(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -645,7 +645,7 @@ def test_no_build_result_set(self): @example(turns=3) @example(turns=axl.DEFAULT_TURNS) def test_play_matches(self, turns): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -693,13 +693,13 @@ def test_match_cache_is_used(self): FakeRandom.classifier["stochastic"] = False p1 = FakeRandom() p2 = FakeRandom() - tournament = axl.Tournament((p1, p2), turns=5, repetitions=2) + tournament = axl.IpdTournament((p1, p2), turns=5, repetitions=2) results = tournament.play(progress_bar=False) for player_scores in results.scores: self.assertEqual(player_scores[0], player_scores[1]) def test_write_interactions(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -721,7 +721,7 @@ def test_write_interactions(self): self.assertEqual(len(calls), 15) def test_write_to_csv_with_results(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -735,7 +735,7 @@ def test_write_to_csv_with_results(self): self.assertTrue(df.equals(expected_df)) def test_write_to_csv_without_results(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -752,14 +752,14 @@ def test_write_to_csv_without_results(self): class TestProbEndTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions cls.test_prob_end = test_prob_end def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -774,7 +774,7 @@ def test_init(self): self.assertEqual(tournament.name, "test") self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") @given( @@ -789,7 +789,7 @@ def test_init(self): ) @settings(max_examples=5) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[s() for s in test_strategies], prob_end=0.2, repetitions=test_repetitions, @@ -799,12 +799,12 @@ def test_init(self): # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, # these two examples were identified by hypothesis. @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, ) ) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, ) ) @@ -820,7 +820,7 @@ def test_property_serial_play(self, tournament): class TestSpatialTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -828,7 +828,7 @@ def setUpClass(cls): cls.test_edges = test_edges def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -845,7 +845,7 @@ def test_init(self): self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) self.assertEqual(tournament.match_generator.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") @given( @@ -872,11 +872,11 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): edges.append((i, j)) # create a round robin tournament - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, repetitions=repetitions, turns=turns, noise=noise ) # create a complete spatial tournament - spatial_tournament = axl.Tournament( + spatial_tournament = axl.IpdTournament( players, repetitions=repetitions, turns=turns, noise=noise, edges=edges ) @@ -915,13 +915,13 @@ def test_particular_tournament(self): axl.Grudger(), ] edges = [(0, 2), (0, 3), (1, 2), (1, 3)] - tournament = axl.Tournament(players, edges=edges) + tournament = axl.IpdTournament(players, edges=edges) results = tournament.play(progress_bar=False) expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] self.assertEqual(results.ranked_names, expected_ranked_names) # Check that this tournament runs with noise - tournament = axl.Tournament(players, edges=edges, noise=0.5) + tournament = axl.IpdTournament(players, edges=edges, noise=0.5) results = tournament.play(progress_bar=False) self.assertIsInstance(results, axl.ResultSet) @@ -929,7 +929,7 @@ def test_particular_tournament(self): class TestProbEndingSpatialTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -937,7 +937,7 @@ def setUpClass(cls): cls.test_edges = test_edges def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -973,7 +973,7 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): players = [s() for s in strategies] # create a prob end round robin tournament - tournament = axl.Tournament(players, prob_end=prob_end, repetitions=reps) + tournament = axl.IpdTournament(players, prob_end=prob_end, repetitions=reps) axl.seed(seed) results = tournament.play(progress_bar=False) @@ -981,7 +981,7 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): # edges edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] - spatial_tournament = axl.Tournament( + spatial_tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=reps, edges=edges ) axl.seed(seed) @@ -1007,7 +1007,7 @@ def test_one_turn_tournament(self, tournament, seed): Tests that gives same result as the corresponding spatial round robin spatial tournament """ - prob_end_tour = axl.Tournament( + prob_end_tour = axl.IpdTournament( tournament.players, prob_end=1, edges=tournament.edges, diff --git a/axelrod/tournament.py b/axelrod/tournament.py index 26144757c..dbe49a2bb 100644 --- a/axelrod/tournament.py +++ b/axelrod/tournament.py @@ -7,26 +7,32 @@ from tempfile import mkstemp from typing import List, Optional, Tuple -import axelrod.interaction_utils as iu import tqdm -from axelrod import DEFAULT_TURNS -from axelrod.action import Action, actions_to_str, str_to_actions -from axelrod.player import Player -from .game import Game -from .match import Match +import axelrod.interaction_utils as iu +from axelrod import DEFAULT_TURNS +from axelrod.action import Action, actions_to_str +from axelrod.base_tournament import BaseTournament +from axelrod.player import IpdPlayer +from .game import IpdGame +from .match import IpdMatch from .match_generator import MatchGenerator from .result_set import ResultSet C, D = Action.C, Action.D -class Tournament(object): +class Tournament(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.tournament.Tournament to axelrod.Tournament") + + +class IpdTournament(BaseTournament): def __init__( self, - players: List[Player], + players: List[IpdPlayer], name: str = "axelrod", - game: Game = None, + game: IpdGame = None, turns: int = None, prob_end: float = None, repetitions: int = 10, @@ -38,10 +44,10 @@ def __init__( Parameters ---------- players : list - A list of axelrod.Player objects + A list of axelrodPlayer objects name : string A name for the tournament - game : axelrod.Game + game : axelrod.IpdGame The game object used to score the tournament turns : integer The number of turns per match @@ -61,7 +67,7 @@ def __init__( but these can be overridden if desired. """ if game is None: - self.game = Game() + self.game = IpdGame() else: self.game = game self.name = name @@ -92,6 +98,18 @@ def __init__( self.filename = None # type: Optional[str] self._temp_file_descriptor = None # type: Optional[int] + super().__init__( + players, + name, + game, + turns, + prob_end, + repetitions, + noise, + edges, + match_attributes + ) + def setup_output(self, filename=None): """assign/create `filename` to `self`. If file should be deleted once `play` is finished, assign a file descriptor. """ @@ -135,7 +153,7 @@ def play( if not build_results and not filename: warnings.warn( - "Tournament results will not be accessible since " + "IpdTournament results will not be accessible since " "build_results=False and no filename was supplied." ) @@ -229,7 +247,8 @@ def _get_file_objects(self, build_results=True): def _get_progress_bar(self): if self.use_progress_bar: - return tqdm.tqdm(total=self.match_generator.size, desc="Playing matches") + return tqdm.tqdm(total=self.match_generator.size, + desc="Playing matches") return None def _write_interactions_to_file(self, results, writer): @@ -253,8 +272,10 @@ def _write_interactions_to_file(self, results, writer): ) = results for index, player_index in enumerate(index_pair): opponent_index = index_pair[index - 1] - row = [self.num_interactions, player_index, opponent_index, repetition, - str(self.players[player_index]), str(self.players[opponent_index])] + row = [self.num_interactions, player_index, opponent_index, + repetition, + str(self.players[player_index]), + str(self.players[opponent_index])] history = actions_to_str([i[index] for i in interaction]) row.append(history) @@ -274,16 +295,20 @@ def _write_interactions_to_file(self, results, writer): for state in states: row.append(state_distribution[state]) for state in states: - row.append(state_to_action_distributions[index][(state, C)]) - row.append(state_to_action_distributions[index][(state, D)]) + row.append(state_to_action_distributions[index][ + (state, C)]) + row.append(state_to_action_distributions[index][ + (state, D)]) - row.append(int(cooperations[index] >= cooperations[index - 1])) + row.append( + int(cooperations[index] >= cooperations[index - 1])) writer.writerow(row) repetition += 1 self.num_interactions += 1 - def _run_parallel(self, processes: int = 2, build_results: bool = True) -> bool: + def _run_parallel(self, processes: int = 2, + build_results: bool = True) -> bool: """ Run all matches in parallel @@ -346,7 +371,8 @@ def _start_workers( """ for worker in range(workers): process = Process( - target=self._worker, args=(work_queue, done_queue, build_results) + target=self._worker, + args=(work_queue, done_queue, build_results) ) work_queue.put("STOP") process.start() @@ -384,7 +410,8 @@ def _process_done_queue( _close_objects(out_file, progress_bar) return True - def _worker(self, work_queue: Queue, done_queue: Queue, build_results: bool = True): + def _worker(self, work_queue: Queue, done_queue: Queue, + build_results: bool = True): """ The work for each parallel sub-process to execute. @@ -427,7 +454,7 @@ def _play_matches(self, chunk, build_results=True): player1 = self.players[p1_index].clone() player2 = self.players[p2_index].clone() match_params["players"] = (player1, player2) - match = Match(**match_params) + match = IpdMatch(**match_params) for _ in range(repetitions): match.play() @@ -451,13 +478,15 @@ def _calculate_results(self, interactions): turns = len(interactions) results.append(turns) - score_per_turns = iu.compute_final_score_per_turn(interactions, self.game) + score_per_turns = iu.compute_final_score_per_turn(interactions, + self.game) results.append(score_per_turns) score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns results.append(score_diffs_per_turns) - initial_coops = tuple(map(bool, iu.compute_cooperations(interactions[:1]))) + initial_coops = tuple( + map(bool, iu.compute_cooperations(interactions[:1]))) results.append(initial_coops) cooperations = iu.compute_cooperations(interactions) diff --git a/docs/tutorials/advanced/games.html b/docs/tutorials/advanced/games.html new file mode 100644 index 000000000..54738a1bc --- /dev/null +++ b/docs/tutorials/advanced/games.html @@ -0,0 +1,427 @@ + + + + + + +Using and playing different stage games + + + +
+

Using and playing different stage games

+ +

As described in :ref:`play_contexts` the default game used for the Prisoner's +Dilemma is given by:

+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 4); backlink

+Unknown interpreted text role "ref".
+
+>>> import axelrod as axl
+>>> pd = axl.game.Game()
+>>> pd
+Axelrod game: (R,P,S,T) = (3, 1, 0, 5)
+>>> pd.RPST()
+(3, 1, 0, 5)
+
+

These Game objects are used to score :ref:`matches <creating_matches>`, +:ref:`tournaments <creating_tournaments>` and :ref:`Moran processes +<moran-process>`:

+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+>>> pd.score((axl.Action.C, axl.Action.C))
+(3, 3)
+>>> pd.score((axl.Action.C, axl.Action.D))
+(0, 5)
+>>> pd.score((axl.Action.D, axl.Action.C))
+(5, 0)
+>>> pd.score((axl.Action.D, axl.Action.D))
+(1, 1)
+
+

It is possible to run a matches, tournaments and Moran processes with a +different game. For example here is the game of chicken:

+
+>>> chicken = axl.game.Game(r=0, s=-1, t=1, p=-10)
+>>> chicken
+Axelrod game: (R,P,S,T) = (0, -10, -1, 1)
+>>> chicken.RPST()
+(0, -10, -1, 1)
+
+

Here is a simple tournament run with this game:

+
+>>> players = [axl.Cooperator(), axl.Defector(), axl.TitForTat()]
+>>> tournament = axl.Tournament(players, game=chicken)
+>>> results = tournament.play()
+>>> results.ranked_names
+['Cooperator', 'Defector', 'Tit For Tat']
+
+

The default Prisoner's dilemma has different results:

+
+>>> tournament = axl.Tournament(players)
+>>> results = tournament.play()
+>>> results.ranked_names
+['Defector', 'Tit For Tat', 'Cooperator']
+
+
+ + diff --git a/docs/tutorials/advanced/games.rst b/docs/tutorials/advanced/games.rst index a2112c35a..a1db4329c 100644 --- a/docs/tutorials/advanced/games.rst +++ b/docs/tutorials/advanced/games.rst @@ -5,7 +5,7 @@ As described in :ref:`play_contexts` the default game used for the Prisoner's Dilemma is given by:: >>> import axelrod as axl - >>> pd = axl.game.Game() + >>> pd = axl.Game() >>> pd Axelrod game: (R,P,S,T) = (3, 1, 0, 5) >>> pd.RPST() @@ -27,7 +27,7 @@ These :code:`Game` objects are used to score :ref:`matches `, It is possible to run a matches, tournaments and Moran processes with a different game. For example here is the game of chicken:: - >>> chicken = axl.game.Game(r=0, s=-1, t=1, p=-10) + >>> chicken = axl.Game(r=0, s=-1, t=1, p=-10) >>> chicken Axelrod game: (R,P,S,T) = (0, -10, -1, 1) >>> chicken.RPST() diff --git a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py index 670a9ec7a..ec9643a99 100644 --- a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py +++ b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py @@ -12,7 +12,7 @@ first_tournament_participants_ordered_by_reported_rank ) axl.seed(0) -tournament = axl.Tournament( +tournament = axl.IpdTournament( players=first_tournament_participants_ordered_by_reported_rank, turns=200, repetitions=5, diff --git a/docs/tutorials/getting_started/summarising_tournaments.rst b/docs/tutorials/getting_started/summarising_tournaments.rst index ebfb39b08..1bcdf9c4d 100644 --- a/docs/tutorials/getting_started/summarising_tournaments.rst +++ b/docs/tutorials/getting_started/summarising_tournaments.rst @@ -18,10 +18,10 @@ that summarises the results of the tournament:: >>> summary = results.summarise() >>> import pprint >>> pprint.pprint(summary) - [Player(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, Initial_C_rate=0.0, CC_rate=...), - Player(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), - Player(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), - Player(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, Initial_C_rate=1.0, CC_rate=...)] + [IpdPlayer(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, Initial_C_rate=0.0, CC_rate=...), + IpdPlayer(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), + IpdPlayer(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), + IpdPlayer(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, Initial_C_rate=1.0, CC_rate=...)] It is also possible to write this data directly to a csv file using the `write_summary` method:: diff --git a/migrate_ipd.sh b/migrate_ipd.sh new file mode 100755 index 000000000..fdfd6290d --- /dev/null +++ b/migrate_ipd.sh @@ -0,0 +1,48 @@ +#!/bin/sh +# Migrates data for ipd. Will delete later. + +# Manually move most files into ipd. +# Change any axelrod.ipd to axelrod if the IDE tried to change any. + +# Replace Player with IpdPlayer +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Player\([^A-Za-z]\)/\1IpdPlayer\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Player\([^A-Za-z]\)/IpdPlayer\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Player$/\1IpdPlayer/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Player$/IpdPlayer/g' {} ';' + +# Replace Match with IpdMatch +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Match\([^A-Za-z]\)/\1IpdMatch\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Match\([^A-Za-z]\)/IpdMatch\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Match$/\1IpdMatch/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Match$/IpdMatch/g' {} ';' + +# Replace Game with IpdGame +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Game\([^A-Za-z]\)/\1IpdGame\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Game\([^A-Za-z]\)/IpdGame\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Game$/\1IpdGame/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Game$/IpdGame/g' {} ';' + +# Replace Tournament with IpdTournament +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Tournament\([^A-Za-z]\)/\1IpdTournament\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Tournament\([^A-Za-z]\)/IpdTournament\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Tournament$/\1IpdTournament/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Tournament$/IpdTournament/g' {} ';' + +# Undo IpdPlayer for Player in quotes +# find . -type f -name "*.py" -exec sed -i 's/\"IpdPlayer\ /\"Player\ /g' {} ';' +# Manually change remaining "IpdPlayer index" to "Player index" + +# Fix imports +# find . -type f -name "*.py" -exec sed -i 's/from\ axelrod\./from\ axelrod\.ipd\./g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/import\ axelrod\./import\ axelrod\.ipd\./g' {} ';' + +# A bunch of stuff needs to be added to __init__, so copy the new file. +# Change "from axelrod.ipd.strategies import FSMPlayer" to "from axelrod.ipd.strategies.finite_state_machines import FSMPlayer" in axelrod_second.py + +# find . -type f -name "*.py" -exec sed -i 's/test_outputs/\.\.\/test_outputs/g' {} ';' + +# Manually change "axl.match" to "axl.ipd.match" and "axl.plot" to "axl.ipd.match" and "axl.strategy_transformers" to "axl.ipd.strategy_transformers" and "axl.result_set" to "axl.ipd.result_set" +# Manually change path in test_load_data.py +# Manually change "from axelrod.ipd.tests import TestTitForTat" to "from axelrod.ipd.tests.strategies.test_titfortat import TestTitForTat" +# Manually SimpleFSM and SimpleHMM to _strategies. +# Manually fix test_hmm imports diff --git a/test b/test index e96b8edb5..ab682950b 100755 --- a/test +++ b/test @@ -1,3 +1,3 @@ #!/usr/bin/env bash -python -m unittest discover axelrod/tests/ +python -m unittest discover python doctests.py