diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..4de3c9b Binary files /dev/null and b/.DS_Store differ diff --git a/_modules/ergodicity/agents/agent_pool.html b/_modules/ergodicity/agents/agent_pool.html index cd9e033..c5eb599 100644 --- a/_modules/ergodicity/agents/agent_pool.html +++ b/_modules/ergodicity/agents/agent_pool.html @@ -114,7 +114,7 @@
from scipy import stats
-[docs]
+[docs]
class AgentPool:
"""
AgentPool represents a collection of agents participating in a wealth dynamics simulation.
@@ -175,7 +175,7 @@ Source code for ergodicity.agents.agent_pool
self.history = [self.wealth.copy()]
-[docs]
+[docs]
def simulate(self, dynamic_s=False):
"""
Run the wealth dynamics simulation for the specified time horizon.
@@ -216,7 +216,7 @@ Source code for ergodicity.agents.agent_pool
# return self.history
-[docs]
+[docs]
def save_data(self, filename):
"""
Save the wealth history data to a file.
@@ -230,7 +230,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def plot(self):
"""
Visualize the wealth dynamics of all agents over time.
@@ -259,7 +259,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def mean_logarithmic_deviation(self):
"""
Compute the Mean Logarithmic Deviation (MLD) for each time step.
@@ -274,7 +274,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def coefficient_of_variation(self):
"""
Compute the Coefficient of Variation for each time step.
@@ -288,7 +288,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def palma_ratio(self):
"""
Compute the Palma ratio for each time step.
@@ -309,7 +309,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def gini_coefficient(self):
"""
Compute the Gini coefficient for each time step.
@@ -329,7 +329,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def plot_inequality_measures(self):
"""
Plot all implemented inequality measures over time.
@@ -361,7 +361,7 @@ Source code for ergodicity.agents.agent_pool
-[docs]
+[docs]
def save_and_plot_wealth_distribution(self, filename_prefix):
"""
Save and plot the final wealth distribution in normal and log-log scales.
@@ -386,23 +386,29 @@ Source code for ergodicity.agents.agent_pool
ax1.set_ylabel('Frequency')
# Log-log scale histogram
- wealth_range = np.logspace(np.log10(min(final_wealth)), np.log10(max(final_wealth)), num=50)
- hist, bins = np.histogram(final_wealth, bins=wealth_range)
+ final_wealth_nonzero = final_wealth[final_wealth > 0]
+ wealth_range = np.logspace(np.log10(min(final_wealth_nonzero)), np.log10(max(final_wealth_nonzero)), num=50)
+ hist, bins = np.histogram(final_wealth_nonzero, bins=wealth_range)
center = (bins[:-1] + bins[1:]) / 2
- ax2.loglog(center, hist, 'k.', markersize=10)
- ax2.set_title('Wealth Distribution (Log-Log Scale)')
- ax2.set_xlabel('Wealth (log scale)')
- ax2.set_ylabel('Frequency (log scale)')
+ # Remove zero counts for log-log fit
+ nonzero = hist > 0
+ log_center = np.log10(center[nonzero])
+ log_hist = np.log10(hist[nonzero])
- # Filter out zero values for log-log fit
- nonzero_wealth = final_wealth[final_wealth > 0]
- fit = stats.linregress(np.log10(nonzero_wealth), np.log10(np.arange(1, len(nonzero_wealth) + 1)[::-1]))
+ # Linear regression in log-log space
+ fit = stats.linregress(log_center, log_hist)
- x_fit = np.logspace(np.log10(min(nonzero_wealth)), np.log10(max(nonzero_wealth)), 100)
+ # Compute the fitted power-law line
+ x_fit = np.logspace(np.log10(min(center[nonzero])), np.log10(max(center[nonzero])), 100)
y_fit = 10 ** (fit.intercept + fit.slope * np.log10(x_fit))
- ax2.plot(x_fit, y_fit, 'r-', label=f'Power Law Fit (α ≈ {-fit.slope:.2f})')
+ # Plotting
+ ax2.loglog(center, hist, 'k.', markersize=10)
+ # ax2.plot(x_fit, y_fit, 'r-', label=f'Power Law Fit (α ≈ {fit.slope:.2f})')
+ ax2.set_title('Wealth Distribution (Log-Log Scale)')
+ ax2.set_xlabel('Wealth (log scale)')
+ ax2.set_ylabel('Frequency (log scale)')
ax2.legend()
plt.tight_layout()
@@ -415,11 +421,16 @@ Source code for ergodicity.agents.agent_pool
import numpy as np
+import matplotlib.pyplot as plt
+import plotly.graph_objects as go
+
+
-[docs]
+[docs]
def plot_wealth_3d(process, w, time, simulation_timestep, timestep, s_range, n_range, save_html=False):
"""
- Plot a 3D graph of total wealth as a function of sharing rate s and number of agents n.
+ Plot a 3D graph of average wealth as a function of sharing rate s and number of agents n.
:param process: The stochastic process to use (e.g., GeometricBrownianMotion instance)
:type process: StochasticProcess
@@ -441,24 +452,24 @@ Source code for ergodicity.agents.agent_pool
:rtype: None
"""
s_values, n_values = np.meshgrid(s_range, n_range)
- total_wealth = np.zeros_like(s_values, dtype=float)
+ average_wealth = np.zeros_like(s_values, dtype=float)
for i, n in enumerate(n_range):
for j, s in enumerate(s_range):
pool = AgentPool(process, n, w, s, time, simulation_timestep, timestep)
pool.simulate(dynamic_s=False)
- total_wealth[i, j] = np.sum(pool.wealth)
+ average_wealth[i, j] = np.mean(pool.wealth)
# Create static 3D plot using Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
- surf = ax.plot_surface(s_values, n_values, total_wealth, cmap='viridis')
+ surf = ax.plot_surface(s_values, n_values, average_wealth, cmap='viridis')
ax.set_xlabel('Sharing Rate (s)')
ax.set_ylabel('Number of Agents (n)')
- ax.set_zlabel('Total Wealth')
- ax.set_title('Total Wealth as a Function of Sharing Rate and Number of Agents')
+ ax.set_zlabel('Average Wealth')
+ ax.set_title('Average Wealth as a Function of Sharing Rate and Number of Agents')
fig.colorbar(surf, shrink=0.5, aspect=5)
@@ -466,17 +477,17 @@ Source code for ergodicity.agents.agent_pool
# Create and save interactive 3D plot using Plotly
if save_html:
- fig_plotly = go.Figure(data=[go.Surface(z=total_wealth, x=s_values, y=n_values)])
+ fig_plotly = go.Figure(data=[go.Surface(z=average_wealth, x=s_values, y=n_values)])
fig_plotly.update_layout(
- title='Total Wealth as a Function of Sharing Rate and Number of Agents',
+ title='Average Wealth as a Function of Sharing Rate and Number of Agents',
scene=dict(
xaxis_title='Sharing Rate (s)',
yaxis_title='Number of Agents (n)',
- zaxis_title='Total Wealth'
+ zaxis_title='Average Wealth'
)
)
- html_filename = 'total_wealth_3d_plot.html'
+ html_filename = 'average_wealth_3d_plot.html'
fig_plotly.write_html(html_filename)
print(f"3D interactive graph saved as {html_filename}")
diff --git a/_modules/ergodicity/agents/agents.html b/_modules/ergodicity/agents/agents.html
index 3c0e76c..d951386 100644
--- a/_modules/ergodicity/agents/agents.html
+++ b/_modules/ergodicity/agents/agents.html
@@ -149,7 +149,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def general_utility_function(x, alpha, beta, gamma, delta, epsilon):
"""
Calculates the utility for a given x and set of parameters.
@@ -177,7 +177,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@dataclass
class Agent_utility:
"""
@@ -205,7 +205,7 @@ Source code for ergodicity.agents.agents
total_accumulated_wealth: float = 1.0 # New field to track total wealth
-[docs]
+[docs]
@staticmethod
def expected_utility(process_dict: Dict[str, Any], params: np.ndarray, t: float = None) -> Union[
Callable[[float], float], float]:
@@ -291,7 +291,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def numerical_expected_utility(process: Union[Dict[str, Any], object], params: np.ndarray,
stochastic_process_class: Type, t: float = None, num_instances: int = 1000) -> Union[
@@ -338,7 +338,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def compare_numerical_and_symbolic_expected_utility(process_dict: Dict[str, Any], params: np.ndarray,
stochastic_process_class: Type, t: float = None):
@@ -382,7 +382,7 @@ Source code for ergodicity.agents.agents
# compare_numerical_and_symbolic_expected_utility(process, params, 1.0, GeometricBrownianMotion)
-[docs]
+[docs]
@staticmethod
def general_utility_function(x, alpha, beta, gamma, delta, epsilon):
"""
@@ -399,7 +399,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def general_utility_function_np(x, alpha, beta, gamma, delta, epsilon):
"""
@@ -427,7 +427,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def visualize_utility_function_evolution(history, output_video_path, output_csv_path):
"""
@@ -494,7 +494,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def initialize_agents(n: int, param_means: np.ndarray, param_stds: np.ndarray) -> List['Agent_utility']:
"""
@@ -512,7 +512,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def mutate_params(params: np.ndarray, mutation_rate: float) -> np.ndarray:
"""
@@ -530,7 +530,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def evolutionary_algorithm(n_agents: int, n_steps: int, save_interval: int,
processes: List[Union[dict, object]],
@@ -699,7 +699,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def evolutionary_algorithm_with_exchange(n_agents: int, n_steps: int, save_interval: int,
processes: List[Union[dict, object]],
@@ -867,7 +867,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
@staticmethod
def evolutionary_algorithm_with_multiple_processes(
n_agents: int, n_steps: int, save_interval: int,
@@ -1035,7 +1035,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def visualize_agent_evolution(history: List[Dict], top_n: int = 5):
"""
Visualize the evolution of top agents and their utility functions.
@@ -1119,7 +1119,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def recursive_flatten(data: Any) -> List[float]:
"""
Recursively flatten any nested structure into a 1D list of floats.
@@ -1140,7 +1140,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def analyze_utility_function_trends(history: List[Dict], num_agents: int = 10):
"""
Analyze and visualize trends in the utility functions' evolutionary dynamics.
@@ -1254,7 +1254,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def process_to_dict(process: Any) -> Dict[str, Any]:
"""
Converts a process object with a closed_formula method to a dictionary format
@@ -1308,7 +1308,7 @@ Source code for ergodicity.agents.agents
-[docs]
+[docs]
def generate_processes(num_processes, process_types, param_ranges):
"""
Generate a list of stochastic processes.
diff --git a/_modules/ergodicity/agents/evaluation.html b/_modules/ergodicity/agents/evaluation.html
index 4cfba6d..c222d6e 100644
--- a/_modules/ergodicity/agents/evaluation.html
+++ b/_modules/ergodicity/agents/evaluation.html
@@ -107,7 +107,6 @@ Source code for ergodicity.agents.evaluation
ufi.plot_utility_functions()
"""
from tensorflow.python.framework.ops import strip_name_scope
-
from ergodicity.agents.sml import *
from ergodicity.tools.helper import ProcessEncoder
import numpy as np
@@ -123,9 +122,10 @@ Source code for ergodicity.agents.evaluation
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
+from ergodicity.tools.compute import *
-[docs]
+[docs]
class UtilityFunction:
"""
UtilityFunction Class
@@ -202,7 +202,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
class UtilityFunctionInference:
"""
UtilityFunctionInference Class
@@ -253,7 +253,7 @@ Source code for ergodicity.agents.evaluation
economics, decision theory, and reinforcement learning, where understanding the underlying
utility functions driving agent behavior is crucial.
"""
- def __init__(self, model_path: str, param_ranges: Dict[str, Dict[str, Tuple[float, float]]]):
+ def __init__(self, model_path: str, param_ranges: Dict[str, Dict[str, Tuple[float, float]]], model=None):
"""
Initialize the UtilityFunctionInference class with a trained model and parameter ranges.
@@ -261,8 +261,13 @@ Source code for ergodicity.agents.evaluation
:type model_path: str
:param param_ranges: Dictionary of process types and their parameter ranges
:type param_ranges: Dict[str, Dict[str, Tuple[float, float]]]
+ :param model: Optional Keras model instance to use instead of loading from file. If None, the model is loaded from the file.
+ :type model: tf.keras.Model or None
"""
- self.model = self.load_model_safely(model_path)
+ if model is not None:
+ self.model = model
+ else:
+ self.model = self.load_model(model_path)
self.agent = NeuralNetworkAgent(self.model)
self.process_encoder = ProcessEncoder()
self.utility_functions = []
@@ -270,10 +275,12 @@ Source code for ergodicity.agents.evaluation
self.mcmc_samples = {}
self.regression_model = None
self.regression_history = None
+ self.dataset = None
+ self.choices = None
-
-[docs]
- def load_model_safely(self, model_path: str):
+
+[docs]
+ def load_model(self, model_path: str):
"""
Load a Keras model from a file, handling potential compatibility issues.
@@ -322,7 +329,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def add_utility_function(self, utility_function):
"""
Add a utility function to the collection for analysis.
@@ -336,72 +343,88 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def generate_dataset(self, n_processes: int) -> List[np.ndarray]:
+[docs]
+ def generate_dataset(self, n_processes: int, n_options: int = 2, simulate_method=True) -> List[List[np.ndarray]]:
"""
Generate a dataset of stochastic processes for analysis.
- :param n_processes: Number of processes to generate
+ :param n_processes: Number of datasets to generate (i.e., number of decision instances)
:type n_processes: int
- :return: List of datasets, each containing process trajectories
- :rtype: List[np.ndarray]
+ :param n_options: Number of process options per decision instance
+ :type n_options: int
+ :return: List of datasets, each containing process options
+ :rtype: List[List[np.ndarray]]
"""
dataset = []
process_types = list(self.param_ranges.keys())
for _ in range(n_processes):
- process_type = np.random.choice(process_types)
- process_type_code = process_types.index(process_type) + 1 # 1-indexed
+ process_options = []
+ for _ in range(n_options):
+ process_type = np.random.choice(process_types)
+ process_type_code = process_types.index(process_type) + 1 # 1-indexed
- params = {param: np.random.uniform(low, high)
- for param, (low, high) in self.param_ranges[process_type].items()}
+ params = {param: np.random.uniform(low, high)
+ for param, (low, high) in self.param_ranges[process_type].items()}
- # Create a process instance
- # print(process_type)
- process_type = globals()[process_type]
- process = process_type(**params)
+ # Create a process instance
+ process_class = globals()[process_type]
+ process = process_class(**params)
- # Simulate the process
- trajectory = process.simulate_until(timestep=0.1, num_instances=1, condition=lambda X: X >= 2, X0=1,
- plot=False)
+ # Simulate the process
+ if simulate_method:
+ trajectory = process.simulate(
+ t=1, timestep=0.1, num_instances=1000, plot=False)
+ else:
+ trajectory = process.simulate_until(
+ timestep=0.1, num_instances=1000, condition=lambda X: X >= 2, X0=1, plot=False)
- # Prepare data: [time, process_type_code, param1, param2, ..., trajectory_value]
- times = np.arange(trajectory.shape[0]) * 0.1
- param_values = list(params.values())
- data = np.column_stack((times[:, np.newaxis],
- np.full((trajectory.shape[0], 1), process_type_code),
- np.tile(param_values, (trajectory.shape[0], 1)),
- trajectory))
+ # Prepare data: trajectory values
+ # Assuming trajectory is of shape (num_time_steps, num_instances)
+ # We take the final values for each instance
+ final_values = trajectory[-1, :]
- dataset.append(data)
+ process_data = np.column_stack((
+ np.full(final_values.shape, process_type_code),
+ np.tile(list(params.values()), (final_values.shape[0], 1)),
+ final_values.reshape(-1, 1)
+ ))
+
+ process_options.append(process_data)
+
+ dataset.append(process_options)
return dataset
-[docs]
- def get_agent_choices(self, dataset: List[np.ndarray]) -> List[int]:
+[docs]
+ def get_agent_choices(self, data: List[np.ndarray]) -> int:
"""
- Get the agent's choices based on the dataset of processes.
+ Get the agent's choice based on a dataset of process options.
- :param dataset: List of datasets, each containing process trajectories
- :type dataset: List[np.ndarray]
- :return: List of agent choices
- :rtype: List[int]
+ :param data: A list of process options, each containing process data (np.ndarray)
+ :type data: List[np.ndarray]
+ :return: The index of the agent's choice
+ :rtype: int
"""
all_encoded_processes = []
- for data in dataset:
+ for process_data in data:
try:
- process_type = self.get_process_type(data[0, 1])
- params = {k: v for k, v in zip(self.param_ranges[process_type].keys(),
- data[0, 2:2 + len(self.param_ranges[process_type])])}
- process_type = globals()[process_type]
- process = process_type(**params)
+ # Extract process_type_code and parameters from the first row
+ first_row = process_data[0]
+ process_type_code = first_row[0]
+ process_type = self.get_process_type(process_type_code)
+ num_params = len(self.param_ranges[process_type])
+ params_values = first_row[1:1 + num_params]
+ params = {k: v for k, v in zip(self.param_ranges[process_type].keys(), params_values)}
+ process_class = globals()[process_type]
+ process = process_class(**params)
encoded_process = self.process_encoder.encode_process_with_time(process, 1.0)
all_encoded_processes.append(encoded_process)
except ValueError as e:
print(f"Error processing data point: {e}")
- # Skip this data point
+ # Skip this process option
continue
if not all_encoded_processes:
@@ -412,11 +435,11 @@ Source code for ergodicity.agents.evaluation
choice = self.agent.select_process(all_encoded_processes)
print(f"Agent's choice: {choice}")
- return [choice] # Return as a list to maintain consistency with previous implementation
+ return choice # Return the index of the chosen process
-[docs]
+[docs]
def get_process_type(self, type_code) -> str:
"""
Get the process type name based on the type code.
@@ -438,54 +461,94 @@ Source code for ergodicity.agents.evaluation
raise ValueError(f"Invalid type_code: {type_code}. Valid codes are {list(type_mapping.keys())}")
+
+[docs]
+ def generate_choices(self, n_options: int = 2, n_choices: int = 100):
+ """
+ Generate agent choices among stochastic processes for testing utility functions with the corresponding dataset of processes to choose from.
+
+ :param n_options: Number of process options per decision instance
+ :param n_choices: Number of choices to generate (number of decision instances)
+ :return dataset: A list of datasets, each containing process options
+ :rtype dataset: List[List[np.ndarray]]
+ :return: A list of choices where each choice is an index of the selected process
+ :rtype: List[int]
+ """
+ choices = []
+ dataset = []
+ for i in range(n_choices):
+ data = self.generate_dataset(n_processes=1, n_options=n_options)
+ data = data[0] # Since generate_dataset returns a list of datasets
+ choice = self.get_agent_choices(data)
+ choices.append(choice)
+ dataset.append(data)
+
+ return dataset, choices
+
+
-[docs]
- def negative_log_likelihood(self, params: List[float], utility_func: Callable, dataset: List[np.ndarray],
+[docs]
+ def negative_log_likelihood(self, params: List[float], utility_func: Callable, dataset: List[List[np.ndarray]],
choices: List[int]) -> float:
"""
- Calculate the negative log-likelihood of the utility function given the dataset and choices.
- It is needed for maximum likelihood estimation (MLE) optimization.
+ Calculate the negative log-likelihood of the utility function given the dataset and choices,
+ assuming the agent maximizes expected utility.
:param params: Utility function parameters
:type params: List[float]
:param utility_func: Utility function to evaluate
:type utility_func: Callable
- :param dataset: List of datasets, each containing process trajectories
- :type dataset: List[np.ndarray]
+ :param dataset: List of datasets, each containing lists of process trajectories
+ :type dataset: List[List[np.ndarray]]
:param choices: List of agent choices
:type choices: List[int]
:return: Negative log-likelihood value
:rtype: float
"""
- utility_values = np.array(
- [utility_func(data[0, 2], *params) for data in dataset]) # Use first parameter as input
- probs = utility_values / np.sum(utility_values)
- return -np.sum(np.log(probs[choices]))
+ total_nll = 0
+ for data, choice in zip(dataset, choices):
+ expected_utilities = []
+ for process_data in data:
+ final_values = process_data[:, -1] # Final values of trajectories
+ utilities = utility_func(final_values, *params)
+ expected_utility = np.mean(utilities)
+ expected_utilities.append(expected_utility)
+ expected_utilities = np.array(expected_utilities)
+ # Softmax over expected utilities
+ exp_utilities = np.exp(expected_utilities - np.max(expected_utilities))
+ probs = exp_utilities / np.sum(exp_utilities)
+ total_nll -= np.log(probs[choice])
+ return total_nll
-[docs]
- def fit_utility_functions(self, dataset: List[np.ndarray], choices: List[int]):
+[docs]
+ def fit_utility_functions(self, dataset: List[List[np.ndarray]], choices: List[int]):
"""
- Fit the utility functions to the observed choices using maximum likelihood estimation (MLE).
+ Fit the utility functions to the observed choices using maximum likelihood estimation (MLE),
+ assuming the agent maximizes expected utility.
- :param dataset: List of datasets, each containing process trajectories
- :type dataset: List[np.ndarray]
+ :param dataset: List of datasets, each containing lists of process trajectories
+ :type dataset: List[List[np.ndarray]]
:param choices: List of agent choices
:type choices: List[int]
:return: None
:rtype: None
"""
for utility_function in self.utility_functions:
- res = minimize(self.negative_log_likelihood, x0=utility_function.initial_params,
- args=(utility_function.func, dataset, choices),
- method='Nelder-Mead', options={'maxiter': 1000})
+ res = minimize(
+ self.negative_log_likelihood,
+ x0=utility_function.initial_params,
+ args=(utility_function.func, dataset, choices),
+ method='Nelder-Mead',
+ options={'maxiter': 1000}
+ )
utility_function.fitted_params = res.x
utility_function.nll = res.fun
-[docs]
+[docs]
def print_results(self):
"""
Print the fitted utility functions and their parameters, including negative log-likelihood.
@@ -501,7 +564,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_utility_functions(self, x_range: Tuple[float, float] = (0, 0.5)):
"""
Plot the fitted utility functions for visualization.
@@ -516,7 +579,7 @@ Source code for ergodicity.agents.evaluation
for utility_function in self.utility_functions:
y = [utility_function(xi) for xi in x]
plt.plot(x, y, label=utility_function.name)
- plt.xlabel('First Parameter (e.g., μ for Brownian Motion)')
+ plt.xlabel('Process value')
plt.ylabel('Utility')
plt.title('Fitted Utility Functions')
plt.legend()
@@ -525,16 +588,16 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def bayesian_fit_utility_functions(self, dataset: List[np.ndarray], choices: List[int],
+[docs]
+ def bayesian_fit_utility_functions(self, dataset: List[List[np.ndarray]], choices: List[int],
n_samples: int = 10000, burn_in: int = 1000):
"""
Perform Bayesian inference on utility functions using Metropolis-Hastings sampling.
It generates samples from the posterior distribution of the utility function parameters.
- In the result, it provides a distribution of parameter values instead of a single point estimate.
+ Provides a distribution of parameter values instead of a single point estimate.
- :param dataset: List of datasets, each containing process trajectories
- :type dataset: List[np.ndarray]
+ :param dataset: List of datasets, each containing lists of process trajectories
+ :type dataset: List[List[np.ndarray]]
:param choices: List of agent choices
:type choices: List[int]
:param n_samples: Number of MCMC samples to generate
@@ -544,24 +607,27 @@ Source code for ergodicity.agents.evaluation
:return: None
:rtype: None
"""
+ # Store dataset and choices as instance variables
+ self.dataset = dataset
+ self.choices = choices
+
for utility_function in self.utility_functions:
samples = self.metropolis_hastings(utility_function, dataset, choices, n_samples, burn_in)
self.mcmc_samples[utility_function.name] = samples
-[docs]
- def metropolis_hastings(self, utility_function, dataset: List[np.ndarray], choices: List[int],
+[docs]
+ def metropolis_hastings(self, utility_function, dataset: List[List[np.ndarray]], choices: List[int],
n_samples: int, burn_in: int) -> np.ndarray:
"""
Perform Metropolis-Hastings sampling for Bayesian inference on utility functions.
- It generates samples from the posterior distribution of the utility function parameters.
- It returns an array of MCMC samples for the utility function parameters.
+ Generates samples from the posterior distribution of the utility function parameters.
:param utility_function: Utility function to fit
:type utility_function: UtilityFunction
- :param dataset: List of datasets, each containing process trajectories
- :type dataset: List[np.ndarray]
+ :param dataset: List of datasets, each containing lists of process trajectories
+ :type dataset: List[List[np.ndarray]]
:param choices: List of agent choices
:type choices: List[int]
:param n_samples: Number of samples to generate
@@ -579,14 +645,18 @@ Source code for ergodicity.agents.evaluation
# Propose new parameters
proposal_params = current_params + norm.rvs(scale=0.1, size=n_params)
- # Calculate log likelihood ratio
- current_ll = -self.negative_log_likelihood(current_params, utility_function.func, dataset, choices)
- proposal_ll = -self.negative_log_likelihood(proposal_params, utility_function.func, dataset, choices)
+ # Calculate log likelihoods
+ current_nll = self.negative_log_likelihood(current_params, utility_function.func, dataset, choices)
+ proposal_nll = self.negative_log_likelihood(proposal_params, utility_function.func, dataset, choices)
+
+ # Calculate acceptance probability
+ acceptance_prob = np.exp(-(proposal_nll - current_nll))
- # Accept or reject
- if np.log(np.random.random()) < proposal_ll - current_ll:
+ # Accept or reject the new parameters
+ if np.random.rand() < acceptance_prob:
current_params = proposal_params
+ # Store the sample after burn-in period
if i >= burn_in:
samples[i - burn_in] = current_params
@@ -594,7 +664,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def print_bayesian_results(self):
"""
Print the mean and standard deviation of the fitted parameters from Bayesian inference.
@@ -611,16 +681,26 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def plot_bayesian_results(self, x_range: Tuple[float, float] = (0, 0.5)):
+[docs]
+ def plot_bayesian_results(self, x_range: Tuple[float, float] = None):
"""
Plot the fitted utility functions based on Bayesian inference.
- :param x_range: Range of x values to plot
- :type x_range: Tuple[float, float]
+ :param x_range: Range of x values to plot. If None, it will be determined based on the data.
+ :type x_range: Tuple[float, float] or None
:return: None
:rtype: None
"""
+ if x_range is None:
+ # Determine x_range based on the final values in the dataset
+ all_final_values = []
+ for data in self.dataset:
+ for process_data in data:
+ final_values = process_data[:, -1]
+ all_final_values.extend(final_values)
+ x_min, x_max = np.min(all_final_values), np.max(all_final_values)
+ x_range = (x_min, x_max)
+
x = np.linspace(x_range[0], x_range[1], 100)
plt.figure(figsize=(12, 8))
@@ -638,7 +718,7 @@ Source code for ergodicity.agents.evaluation
y_95 = np.percentile(y_samples, 95, axis=0)
plt.fill_between(x, y_5, y_95, alpha=0.3)
- plt.xlabel('First Parameter (e.g., μ for Brownian Motion)')
+ plt.xlabel('Process Value')
plt.ylabel('Utility')
plt.title('Fitted Utility Functions (Bayesian Inference)')
plt.legend()
@@ -647,7 +727,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_parameter_distributions(self):
"""
Plot the distributions of fitted parameters from Bayesian inference.
@@ -672,15 +752,18 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def regression_fit(self, n_processes: int = 10000, test_size: float = 0.2, epochs: int = 100,
+[docs]
+ def regression_fit(self, n_processes: int = 10000, n_options: int = 2, test_size: float = 0.2, epochs: int = 100,
batch_size: int = 32):
"""
Train a regression model to predict agent preferences based on process parameters.
- The model is trained using a dataset of stochastic processes and agent choices.
+ The model is trained using a dataset of stochastic processes and agent choices,
+ assuming the agent maximizes expected utility.
- :param n_processes: Number of processes to generate for training
+ :param n_processes: Number of decision instances to generate for training
:type n_processes: int
+ :param n_options: Number of process options per decision instance
+ :type n_options: int
:param test_size: Fraction of data to use for testing
:type test_size: float
:param epochs: Number of training epochs
@@ -691,30 +774,48 @@ Source code for ergodicity.agents.evaluation
:return: None
:rtype: None
"""
- if test_size <= 0 or test_size >= 1:
+ # Input Validation
+ if not (0 < test_size < 1):
raise ValueError("test_size must be between 0 and 1")
if epochs <= 0:
raise ValueError("epochs must be a positive integer")
if batch_size <= 0:
raise ValueError("batch_size must be a positive integer")
- # Generate dataset and get agent choice
- dataset = self.generate_dataset(n_processes)
- choice = self.get_agent_choices(dataset)[0] # Get the single choice
+ # Generate dataset and get agent choices
+ dataset, choices = self.generate_choices(n_options=n_options, n_choices=n_processes)
+
+ # Assign dataset and choices to instance variables for potential use in plotting
+ self.dataset = dataset
+ self.choices = choices
# Prepare input data (process type code and parameters) and output data (choice)
X = []
- for data in dataset:
- process_type_code = data[0, 1]
- params = data[0, 2:2 + len(list(self.param_ranges.values())[0])]
- X.append(np.concatenate(([process_type_code], params)))
+ y = []
+ for data, choice in zip(dataset, choices):
+ for idx, process_data in enumerate(data):
+ # Extract process_type_code and parameters from the first row
+ first_row = process_data[0]
+ process_type_code = first_row[0]
+ process_type = self.get_process_type(process_type_code)
+ num_params = len(self.param_ranges[process_type])
+ params_values = first_row[1:1 + num_params]
+ process_params = list(params_values)
+
+ # Create feature vector: [process_type_code, param1, param2, ...]
+ feature_vector = np.concatenate(([process_type_code], params_values))
+ X.append(feature_vector)
+
+ # Create label: 1 if this process option is the chosen one, else 0
+ label = 1 if idx == choice else 0
+ y.append(label)
+
X = np.array(X)
- y = np.zeros(len(X))
- y[choice] = 1 # Set the chosen process to 1, all others to 0
+ y = np.array(y)
# Print shapes for debugging
- print(f"X shape: {X.shape}")
- print(f"y shape: {y.shape}")
+ print(f"X shape: {X.shape}") # Should be (n_processes * n_options, num_features)
+ print(f"y shape: {y.shape}") # Should be (n_processes * n_options,)
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
@@ -722,16 +823,18 @@ Source code for ergodicity.agents.evaluation
# Print final shapes for debugging
print(f"X_train shape: {X_train.shape}")
print(f"X_test shape: {X_test.shape}")
+ print(f"y_train shape: {y_train.shape}")
+ print(f"y_test shape: {y_test.shape}")
- # Create and compile the model
+ # Create and compile the regression model
self.regression_model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_shape=(X_train.shape[1],)),
keras.layers.Dense(32, activation='relu'),
- keras.layers.Dense(1, activation='sigmoid')
+ keras.layers.Dense(1, activation='sigmoid') # Binary classification
])
self.regression_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
- # Train the model
+ # Train the regression model
self.regression_history = self.regression_model.fit(
X_train, y_train,
epochs=epochs,
@@ -742,7 +845,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_regression_results(self):
"""
Plot the results of the regression model training.
@@ -780,7 +883,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def predict_preference(self, process_type: str, params: Dict[str, float]) -> float:
"""
Predict the agent's preference for a given process type and parameters.
@@ -805,7 +908,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_preference_heatmap(self, process_type: str, param1: str, param2: str, n_points: int = 20):
"""
Plot a heatmap of agent preferences for different parameter values of a process type.
@@ -850,7 +953,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def analyze_feature_importance(self):
"""
Analyze the importance of different features in the regression model.
@@ -897,7 +1000,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def analyze_feature_interactions(self):
"""
Analyze the interaction strength between features in the regression model.
@@ -933,7 +1036,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_partial_dependence(self, feature_index, num_points=100):
"""
Plot the partial dependence of the predicted preference on a selected feature.
@@ -983,40 +1086,69 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def perform_irl(self, n_processes=1000):
+[docs]
+ def perform_irl(self, n_processes=1000, n_options=2):
"""
Perform Inverse Reinforcement Learning (IRL) to infer the reward function from agent choices.
It uses the MaxEntIRL algorithm to learn the reward weights based on agent behavior.
It is designed to understand the underlying reward structure that drives the agent's decisions.
- :param n_processes: Number of processes to generate for IRL
+ :param n_processes: Number of decision instances to generate for IRL
:type n_processes: int
+ :param n_options: Number of process options per decision instance
+ :type n_options: int
:return: Inferred reward weights
:rtype: np.ndarray
"""
- dataset = self.generate_dataset(n_processes)
- choices = self.get_agent_choices(dataset)
+ # Generate dataset and agent choices
+ dataset, choices = self.generate_choices(n_options=n_options, n_choices=n_processes)
+
+ # Assign dataset and choices to instance variables for potential use in plotting
+ self.dataset = dataset
+ self.choices = choices
# Prepare trajectories for IRL
trajectories = []
for data, choice in zip(dataset, choices):
- state = np.concatenate(([data[0, 1]], data[0, 2:2 + len(list(self.param_ranges.values())[0])]))
- trajectories.append([(state, choice)])
+ trajectory = []
+ for idx, process_data in enumerate(data):
+ # Extract process features from the first trajectory (all trajectories have the same features)
+ first_row = process_data[0]
+ process_type_code = first_row[0]
+ process_type = self.get_process_type(process_type_code)
+ num_params = len(self.param_ranges[process_type])
+ params_values = first_row[1:1 + num_params]
+ state = np.concatenate(([process_type_code], params_values))
+
+ # Define action: 1 if chosen, 0 otherwise
+ action = 1 if idx == choice else 0
+
+ # Append the (state, action) pair to the trajectory
+ trajectory.append((state, action))
+ trajectories.append(trajectory)
# Initialize and fit IRL model
- n_features = len(state)
- n_actions = len(set(choices))
+ # Number of features corresponds to the length of the state vector
+ n_features = len(trajectories[0][0][0])
+ # Number of actions: binary (chosen or not chosen)
+ n_actions = 2
+
+ # Initialize MaxEntIRL model
irl_model = MaxEntIRL(n_features, n_actions)
+
+ # Fit the IRL model using the prepared trajectories
reward_weights = irl_model.fit(trajectories)
# Print and plot results
print("Inferred Reward Weights:")
- feature_names = ['Process Type'] + list(self.param_ranges[list(self.param_ranges.keys())[0]].keys())
+ # Feature names: ['Process Type', 'Param1', 'Param2', ...]
+ process_types = list(self.param_ranges.keys())
+ feature_names = ['Process Type'] + list(self.param_ranges[process_types[0]].keys())
for name, weight in zip(feature_names, reward_weights):
print(f"{name}: {weight:.4f}")
+ # Plot the inferred reward weights
plt.figure(figsize=(10, 6))
plt.bar(feature_names, reward_weights)
plt.title("Inferred Reward Weights")
@@ -1031,7 +1163,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
class MaxEntIRL:
"""
MaxEntIRL (Maximum Entropy Inverse Reinforcement Learning) Class
@@ -1107,7 +1239,7 @@ Source code for ergodicity.agents.evaluation
self.reward_weights = np.random.rand(n_features)
-[docs]
+[docs]
def feature_expectations(self, trajectories):
"""
Compute empirical feature expectations.
@@ -1126,7 +1258,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def compute_state_visitation_freq(self, trajectories, policy):
"""
Compute state visitation frequencies.
@@ -1154,7 +1286,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def compute_expected_svf(self, trajectories, policy):
"""
Compute expected state visitation frequency.
@@ -1176,7 +1308,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def compute_gradient(self, feat_exp, exp_svf):
"""
Compute the gradient for the optimization.
@@ -1193,7 +1325,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def optimize_reward(self, trajectories):
"""
Optimize the reward function.
@@ -1218,7 +1350,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def compute_policy(self, trajectories):
"""
Compute the policy based on current reward weights.
@@ -1238,7 +1370,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def fit(self, trajectories):
"""
Fit the IRL model to the given trajectories.
@@ -1255,7 +1387,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def predict_reward(self, state):
"""
Predict the reward for a given state.
@@ -1314,7 +1446,7 @@ Source code for ergodicity.agents.evaluation
return np.log(1 + gamma * x)
-[docs]
+[docs]
def utility_quadratic(x: float, a: float, b: float) -> float:
"""
Quadratic utility function.
@@ -1333,7 +1465,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def utility_arctan(x: float, k: float) -> float:
"""
Arctan utility function.
@@ -1350,7 +1482,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def utility_sigmoid(x: float, k: float, x0: float) -> float:
"""
Sigmoid utility function.
@@ -1369,7 +1501,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def utility_linear_threshold(x: float, a: float, b: float) -> float:
"""
Linear threshold utility function.
@@ -1388,7 +1520,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def utility_cobb_douglas(x: float, alpha: float, beta: float) -> float:
"""
Cobb-Douglas utility function.
@@ -1407,7 +1539,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def utility_prospect_theory(x: float, alpha: float, lambda_: float) -> float:
"""
Prospect Theory utility function.
@@ -1479,7 +1611,7 @@ Source code for ergodicity.agents.evaluation
print(f"The utility of {x} according to the best-fitting function is: {utility}")
-[docs]
+[docs]
class UtilityFunctionTester:
"""
UtilityFunctionTester Class
@@ -1555,7 +1687,7 @@ Source code for ergodicity.agents.evaluation
self.results = []
-[docs]
+[docs]
def generate_process_parameters(self, n_samples: int) -> List[Dict[str, float]]:
"""
Generate random process parameters within specified ranges.
@@ -1574,7 +1706,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def simulate_process(self, params: Dict[str, float], n_steps: int) -> np.ndarray:
"""
Simulate the stochastic process with given parameters.
@@ -1591,7 +1723,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def calculate_utility(self, utility_func: Callable, trajectory: np.ndarray, utility_params: List[float]) -> float:
"""
Calculate the utility of a given stochastic process instance using a utility function.
@@ -1608,7 +1740,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def optimize_utility_function(self, utility_func: Callable, trajectory: np.ndarray) -> Tuple[List[float], float]:
"""
Optimize the parameters of a utility function for a given trajectory.
@@ -1630,7 +1762,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def test_utility_functions(self, process_params: Dict[str, float], n_steps: int) -> Dict:
"""
Test all utility functions against a simulated process trajectory.
@@ -1655,8 +1787,8 @@ Source code for ergodicity.agents.evaluation
-[docs]
- def run_tests(self, n_processes: int, n_steps: int, n_jobs: int = -1):
+[docs]
+ def run_tests(self, n_processes: int, n_steps: int, n_jobs: int = 1):
"""
Run tests for multiple processes in parallel.
@@ -1677,7 +1809,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def analyze_results(self):
"""
Perform statistical analysis on the test results for all utility functions.
@@ -1723,7 +1855,7 @@ Source code for ergodicity.agents.evaluation
-[docs]
+[docs]
def plot_optimal_utility_vs_process_params(self):
"""
Plot the relationship between process parameters and optimal utility values.
@@ -1755,25 +1887,16 @@ Source code for ergodicity.agents.evaluation
from ergodicity.process.basic import BrownianMotion
-
-
+ return x ** beta
-
-
+ return 1 - np.exp(-alpha * x)
-
-
+ return np.log(1 + gamma * x)
process_class = BrownianMotion
@@ -1788,6 +1911,191 @@ Source code for ergodicity.agents.evaluation
tester.run_tests(n_processes=1000, n_steps=1000)
tester.analyze_results()
tester.plot_optimal_utility_vs_process_params()
+
+
+# Utility functions as functions of x without parameters
+
+
+
+
+
+
+
+
+
+# Updated utility_functions list
+utility_functions = [
+ {'name': 'Power', 'function': utility_power},
+ {'name': 'Exponential', 'function': utility_exp},
+ {'name': 'Logarithmic', 'function': utility_log},
+]
+
+# Define the agent class
+
+[docs]
+class AgentEvaluation:
+ """
+ AgentEvaluation Class
+ """
+ def __init__(self, model):
+ """
+ Initialize the AgentEvaluation with the given model.
+
+ :param model: Trained model for selecting processes
+ :type model: Model
+ """
+ self.model = model
+
+
+[docs]
+ def select_process(self, encoded_processes, select_max=True):
+ """
+ Select a process based on the model's predictions.
+
+ :param encoded_processes: Encoded processes to choose from
+ :param select_max: Whether to select the process with the maximum score
+ :return: Index of the selected process
+ """
+ # Compute scores for each process
+ scores = self.model.predict(encoded_processes)
+ if select_max:
+ choice = np.argmax(scores)
+ else:
+ choice = np.argmin(scores)
+ return choice
+
+
+
+# Main function
+
+[docs]
+def evaluate_utility_functions(utility_functions, agent, processes, param_ranges, n_process_batches=100, n_options=2, num_instances=1000, select_max=True):
+ """
+ Evaluate utility functions based on agent choices.
+ The utility functions are estimated according to the maximum likelihood of the agent's choices.
+
+ :param utility_functions: List of utility functions to evaluate
+ :rtype: List[Dict[str, float]]
+ :param agent: An instance of the AgentEvaluation class
+ :rtype agent: AgentEvaluation
+ :param processes: A list of process types
+ :rtype processes: List[Dict[str, str]]
+ :param param_ranges: A dictionary of parameter ranges for each process type
+ :rtype param_ranges: Dict[str, Dict[str, Tuple[float, float]]]
+ :param n_process_batches: Number of process batches to evaluate
+ :rtype n_process_batches: int
+ :param n_options: Number of process options per batch
+ :rtype n_options: int
+ :param num_instances: Number of instances used in the expected utility calculation
+ :rtype num_instances: int
+ :param select_max: Whether to select the process with the maximum expected utility or with the minimum
+ :rtype select_max: bool
+ :return: Dictionary of likelihood scores for each utility function
+ :rtype: Dict[str, float]
+ """
+ # Define the process encoder
+ process_encoder = ProcessEncoder()
+
+ # Initialize counters for utility functions
+ utility_counts = {uf['name']: 0 for uf in utility_functions}
+
+ # Total number of batches
+ total_batches = n_process_batches
+
+ for batch_index in range(n_process_batches):
+ # For each batch, generate n_options processes
+ process_options = []
+ for option_index in range(n_options):
+ # Randomly select a process type
+ process_info = np.random.choice(processes)
+ process_type = process_info['type']
+ process_class = globals()[process_type]
+ # Randomly select parameters within specified ranges
+ params = {}
+ for param_name, (low, high) in param_ranges[process_type].items():
+ params[param_name] = np.random.uniform(low, high)
+ # Create process instance
+ process = process_class(**params)
+ # Simulate the process to get final values x
+ x = process.simulate(t=1.0, num_instances=num_instances)
+ times, x = separate(x)
+ # Get final values
+ x = x[:, -1]
+ # x = np.maximum(x, 0.001) # Ensure x is positive
+ # Store the process data
+ process_data = {
+ 'process': process,
+ 'x': x
+ }
+ process_options.append(process_data)
+
+ # Encode the processes using ProcessEncoder
+ encoded_processes = []
+ for process_data in process_options:
+ process = process_data['process']
+ encoded_process = process_encoder.encode_process_with_time(process, time=1.0)
+ encoded_processes.append(encoded_process)
+ encoded_processes = np.array(encoded_processes)
+
+ # The agent makes its choice
+ agent_choice = agent.select_process(encoded_processes, select_max=select_max)
+
+ # For each utility function, compute expected utilities and determine the process with the highest expected utility
+ for uf in utility_functions:
+ expected_utilities = []
+ for p_data in process_options:
+ x = p_data['x']
+ u_x = uf['function'](x)
+ expected_u = np.mean(u_x)
+ expected_utilities.append(expected_u)
+ # Determine the process with the maximum expected utility
+ if select_max:
+ utility_choice = np.argmax(expected_utilities)
+ else:
+ utility_choice = np.argmin(expected_utilities)
+ # Compare with agent's choice
+ if utility_choice == agent_choice:
+ utility_counts[uf['name']] += 1
+
+ # After all batches, calculate likelihood scores
+ likelihood_scores = {}
+ for uf in utility_functions:
+ count = utility_counts[uf['name']]
+ likelihood = count / total_batches
+ likelihood_scores[uf['name']] = likelihood
+
+ # Return the likelihood scores
+ return likelihood_scores
+
+
+# Example usage
+if __name__ == "__main__":
+ # Instantiate the agent
+ agent = AgentEvaluation()
+
+ # Evaluate utility functions
+ likelihood_scores = evaluate_utility_functions(
+ utility_functions=utility_functions,
+ agent=agent,
+ n_process_batches=100,
+ n_options=2,
+ num_instances=1000,
+ select_max=True
+ )
+
+ # Print the likelihood scores
+ print("Likelihood scores for each utility function:")
+ for name, score in likelihood_scores.items():
+ print(f"{name}: {score:.4f}")
diff --git a/_modules/ergodicity/agents/evolutionary_nn.html b/_modules/ergodicity/agents/evolutionary_nn.html
index 3929cb2..4d36baf 100644
--- a/_modules/ergodicity/agents/evolutionary_nn.html
+++ b/_modules/ergodicity/agents/evolutionary_nn.html
@@ -127,7 +127,6 @@ Source code for ergodicity.agents.evolutionary_nn
print(f"Best agent accumulated wealth: {best_agent.accumulated_wealth}")
"""
-
from typing import List, Callable, Type, Union, Dict, Any, Tuple
import torch.nn as nn
import numpy as np
@@ -207,7 +206,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
class DynamicBatchNorm1d(nn.Module):
"""
DynamicBatchNorm1d Class
@@ -265,7 +264,7 @@ Source code for ergodicity.agents.evolutionary_nn
self.running_var = nn.Parameter(torch.ones(num_features), requires_grad=False)
-[docs]
+[docs]
def forward(self, x):
"""
Forward pass of the dynamic Batch Normalization layer.
@@ -283,7 +282,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
class NeuralNetwork(nn.Module):
"""
NeuralNetwork Class
@@ -436,7 +435,7 @@ Source code for ergodicity.agents.evolutionary_nn
self.optimizer = self._get_optimizer()
-[docs]
+[docs]
def forward(self, x):
"""
Forward pass through the neural network.
@@ -488,7 +487,7 @@ Source code for ergodicity.agents.evolutionary_nn
raise ValueError(f"Unsupported optimizer: {self.optimizer_name}")
-[docs]
+[docs]
def mutate(self, mutation_rate=0.1, mutation_scale=0.1):
"""
Apply random mutations to the network parameters.
@@ -509,7 +508,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def get_num_parameters(self):
"""
Return the total number of trainable parameters in the network.
@@ -521,7 +520,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def clone(self):
"""
Create a deep copy of the network.
@@ -547,7 +546,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def save(self, path):
"""
Save the model state and hyperparameters to a file.
@@ -577,7 +576,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
@classmethod
def load(cls, path):
"""
@@ -598,7 +597,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
class NeuralNetworkAgent:
"""
NeuralNetworkAgent Class
@@ -673,7 +672,7 @@ Source code for ergodicity.agents.evolutionary_nn
self.fitness = 0.0
-[docs]
+[docs]
def select_process(self, encoded_processes: List[List[float]]) -> int:
"""
Select a process based on the neural network's output.
@@ -696,7 +695,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def update_wealth(self, process_return: float):
"""
Update the agent's wealth based on the process return.
@@ -709,28 +708,28 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def reset_wealth(self):
"""Reset the agent's wealth to the initial value."""
self.wealth = 1.0
-[docs]
+[docs]
def calculate_fitness(self):
"""Calculate the fitness of the agent based on accumulated wealth."""
self.fitness = np.log(self.accumulated_wealth)
-[docs]
+[docs]
def mutate(self, mutation_rate: float = 0.1, mutation_scale: float = 0.1):
"""Mutate the agent's neural network."""
self.network.mutate(mutation_rate, mutation_scale)
-[docs]
+[docs]
def clone(self):
"""Create a clone of the agent with the same network structure but newly initialized weights."""
cloned_network = self.network.clone()
@@ -742,7 +741,7 @@ Source code for ergodicity.agents.evolutionary_nn
return f"NeuralNetworkAgent(wealth={self.wealth:.2f}, accumulated_wealth={self.accumulated_wealth:.2f}, fitness={self.fitness:.2f})"
-[docs]
+[docs]
def save(self, path: str):
"""
Save the agent's state to a file.
@@ -776,7 +775,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
@classmethod
def load(cls, path: str):
"""
@@ -807,7 +806,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
class EvolutionaryNeuralNetworkTrainer:
"""
EvolutionaryNeuralNetworkTrainer Class
@@ -891,12 +890,12 @@ Source code for ergodicity.agents.evolutionary_nn
"""
def __init__(self,
population_size: int,
- input_size: int,
hidden_sizes: List[int],
- output_size: int,
processes: List[Union[dict, object]],
process_encoder: ProcessEncoder,
process_times: List[float],
+ input_size: int = 11,
+ output_size: int = 1,
mutation_rate: float = 0.1,
mutation_scale: float = 0.1,
with_exchange: bool = False,
@@ -975,7 +974,7 @@ Source code for ergodicity.agents.evolutionary_nn
self.performance_history = []
-[docs]
+[docs]
def log(self, message: str):
"""
Log a message to the log file.
@@ -991,7 +990,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def save_performance_metrics(self):
"""
Save performance metrics to a CSV file.
@@ -1006,7 +1005,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def save_best_weights(self, best_agent: NeuralNetworkAgent):
"""
Save the weights of the best performing agent.
@@ -1020,7 +1019,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def visualize_performance(self):
"""
Create separate visualizations for average and max wealth during training.
@@ -1042,6 +1041,7 @@ Source code for ergodicity.agents.evolutionary_nn
plt.grid(True)
plt.tight_layout()
plt.savefig(os.path.join(self.output_dir, filename))
+ plt.show()
plt.close()
# Create and save average wealth graph
@@ -1064,8 +1064,8 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
- def visualize_neural_network_evolution(self, output_video_path, output_csv_path):
+[docs]
+ def visualize_neural_network_evolution(self, output_video_path='neural_network_evolution.mp4', output_csv_path='best_agent_params.csv'):
"""
Create a video visualization of the neural network evolution and save best agent parameters to CSV.
@@ -1131,7 +1131,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def initialize_population(self) -> List[NeuralNetworkAgent]:
"""
Initialize the population of neural network agents.
@@ -1149,7 +1149,7 @@ Source code for ergodicity.agents.evolutionary_nn
# return [encoded_process[0]] + [time] + encoded_process[1:]
-[docs]
+[docs]
def select_top_agents(self, k: int) -> List[NeuralNetworkAgent]:
"""
Select the top k agents based on accumulated wealth.
@@ -1164,7 +1164,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def reproduce_with_exchange(self):
"""
Reproduce agents with population-wide information exchange.
@@ -1197,7 +1197,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def reproduce_without_exchange(self):
new_agents = []
for agent in self.population:
@@ -1213,7 +1213,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def train(self, n_steps: int, save_interval: int):
"""
Run the main training loop for the specified number of steps to train the evolutionary neural network.
@@ -1340,7 +1340,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def load_best_weights(self, agent: NeuralNetworkAgent):
"""
Load the best weights into an agent's network.
@@ -1478,7 +1478,7 @@ Source code for ergodicity.agents.evolutionary_nn
print(f"Best agent accumulated wealth: {best_agent.accumulated_wealth}")
-[docs]
+[docs]
class ReinforcementEvolutionaryTrainer:
"""
ReinforcementEvolutionaryTrainer Class
@@ -1569,37 +1569,39 @@ Source code for ergodicity.agents.evolutionary_nn
mutation_rate: float = 0.1,
mutation_scale: float = 0.1,
rl_interval: int = 10,
- elite_percentage: float = 0.2
+ elite_percentage: float = 0.2,
+ output_dir: str = 'output_nn'
):
"""
Initialize the ReinforcementEvolutionaryTrainer.
- :param population_size: Number of agents in the population
+ :param population_size: Number of agents in the population
:type population_size: int
- :param input_size: Size of the input layer
+ :param input_size: Size of the input layer
:type input_size: int
- :param hidden_sizes: List of hidden layer sizes
+ :param hidden_sizes: List of hidden layer sizes
:type hidden_sizes: List[int]
- :param output_size: Size of the output layer
+ :param output_size: Size of the output layer
:type output_size: int
- :param processes: List of stochastic processes
+ :param processes: List of stochastic processes
:type processes: List[Union[dict, object]]
- :param process_encoder: ProcessEncoder instance
+ :param process_encoder: ProcessEncoder instance
:type process_encoder: ProcessEncoder
- :param process_times: List of time values for process encoding
+ :param process_times: List of time values for process encoding
:type process_times: List[float]
- :param learning_rate: Learning rate for the neural networks
+ :param learning_rate: Learning rate for the neural networks
:type learning_rate: float
- :param mutation_rate: Probability of mutating each parameter
+ :param mutation_rate: Probability of mutating each parameter
:type mutation_rate: float
- :param mutation_scale: Scale of the mutation (standard deviation of the Gaussian noise)
+ :param mutation_scale: Scale of the mutation (standard deviation of the Gaussian noise)
:type mutation_scale: float
- :param rl_interval: Interval for reinforcement learning updates
+ :param rl_interval: Interval for reinforcement learning updates
:type rl_interval: int
- :param elite_percentage: Percentage of elite agents to keep in each generation
+ :param elite_percentage: Percentage of elite agents to keep in each generation
:type elite_percentage: float
+ :param output_dir: Directory for saving output files
+ :type output_dir: str
"""
-
self.population_size = population_size
self.input_size = input_size
self.hidden_sizes = hidden_sizes
@@ -1616,8 +1618,170 @@ Source code for ergodicity.agents.evolutionary_nn
self.population = self.initialize_population()
self.optimizers = [optim.Adam(agent.network.parameters(), lr=self.learning_rate) for agent in self.population]
+ self.history = []
+ self.performance_history = []
+ self.output_dir = output_dir
+ os.makedirs(self.output_dir, exist_ok=True)
+
+ self.log_file = os.path.join(self.output_dir, 'training_log.txt')
+ self.performance_file = os.path.join(self.output_dir, 'performance_metrics.csv')
+ self.best_weights_file = os.path.join(self.output_dir, 'best_weights.pth')
+
+
+[docs]
+ def log(self, message: str):
+ """
+ Log a message to the log file.
+
+ :param message: Message to log
+ :type message: str
+ """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ with open(self.log_file, 'a') as f:
+ f.write(f"{timestamp} - {message}\n")
+
+
+
+[docs]
+ def save_performance_metrics(self):
+ """
+ Save performance metrics to a CSV file.
+ """
+ if not self.performance_history:
+ return
+ with open(self.performance_file, 'w', newline='') as f:
+ writer = csv.DictWriter(f, fieldnames=self.performance_history[0].keys())
+ writer.writeheader()
+ writer.writerows(self.performance_history)
+
+
+
+[docs]
+ def save_best_weights(self, best_agent: NeuralNetworkAgent):
+ """
+ Save the weights of the best-performing agent.
+
+ :param best_agent: Best-performing agent
+ :type best_agent: NeuralNetworkAgent
+ """
+ torch.save(best_agent.network.state_dict(), self.best_weights_file)
+
+
+
+[docs]
+ def visualize_performance(self):
+ """
+ Create visualizations for average and maximum fitness during training.
+ """
+ steps = [metric['step'] for metric in self.performance_history]
+ avg_fitness = [metric['avg_fitness'] for metric in self.performance_history]
+ max_fitness = [metric['max_fitness'] for metric in self.performance_history]
+
+ # Function to create and save a single graph
+ def create_fitness_graph(fitness_data, ylabel, title, filename):
+ plt.figure(figsize=(12, 6))
+ plt.plot(steps, fitness_data)
+ plt.xlabel('Step')
+ plt.ylabel(ylabel)
+ plt.title(title)
+ plt.grid(True)
+ plt.tight_layout()
+ plt.savefig(os.path.join(self.output_dir, filename))
+ plt.show()
+ plt.close()
+
+ # Create and save average fitness graph
+ create_fitness_graph(
+ avg_fitness,
+ 'Average Fitness',
+ 'Average Fitness Evolution During Training',
+ 'average_fitness_evolution.png'
+ )
+
+ # Create and save maximum fitness graph
+ create_fitness_graph(
+ max_fitness,
+ 'Maximum Fitness',
+ 'Maximum Fitness Evolution During Training',
+ 'max_fitness_evolution.png'
+ )
+
+ self.log("Performance visualization graphs have been created and saved.")
+
+
+
+[docs]
+ def visualize_neural_network_evolution(self, output_video_path='neural_network_evolution.mp4',
+ output_csv_path='best_agent_params.csv'):
+ """
+ Create a video visualization of the neural network evolution and save best agent parameters to CSV.
+
+ :param output_video_path: Path to save the output video
+ :type output_video_path: str
+ :param output_csv_path: Path to save the CSV file with best agent parameters
+ :type output_csv_path: str
+ """
+ # Extract data from history
+ steps = [entry['step'] for entry in self.history]
+ best_params_history = [entry['best_params'] for entry in self.history]
+
+ if not best_params_history:
+ self.log("No history data available for visualization.")
+ return
+
+ # Prepare data for visualization
+ param_names = list(best_params_history[0].keys())
+ param_values = {name: [params[name].cpu().numpy().flatten() for params in best_params_history] for name in
+ param_names}
+
+ # Create figure and axes for animation
+ num_params = len(param_names)
+ fig, axes = plt.subplots(num_params, 1, figsize=(12, 4 * num_params))
+ if num_params == 1:
+ axes = [axes]
+
+ # Initialize plots
+ plots = []
+ for ax, name in zip(axes, param_names):
+ plot, = ax.plot([], [], 'b-')
+ ax.set_xlim(0, len(param_values[name][0]))
+ all_values = np.concatenate(param_values[name])
+ ax.set_ylim(np.min(all_values), np.max(all_values))
+ ax.set_title(f'Evolution of {name}')
+ ax.set_xlabel('Parameter Index')
+ ax.set_ylabel('Parameter Value')
+ plots.append(plot)
+
+ # Animation update function
+ def update(frame):
+ for plot, name in zip(plots, param_names):
+ plot.set_data(range(len(param_values[name][frame])), param_values[name][frame])
+ return plots
+
+ # Create animation
+ anim = animation.FuncAnimation(fig, update, frames=len(steps), interval=200, blit=True)
+
+ # Save animation as video
+ writer = animation.FFMpegWriter(fps=5, metadata=dict(artist='ReinforcementEvolutionaryTrainer'), bitrate=1800)
+ anim.save(os.path.join(self.output_dir, output_video_path), writer=writer)
+
+ # Save best agent parameters to CSV
+ best_agent_params = best_params_history[-1]
+ param_dict = {name: best_agent_params[name].cpu().numpy().flatten() for name in param_names}
+ max_length = max(len(arr) for arr in param_dict.values())
+ param_dict = {name: np.pad(arr, (0, max_length - len(arr)), 'constant', constant_values=np.nan)
+ for name, arr in param_dict.items()}
+ df = pd.DataFrame(param_dict)
+ df.to_csv(os.path.join(self.output_dir, output_csv_path), index=False)
+
+ plt.close(fig)
+
+ self.log(f"Neural network evolution video saved to {output_video_path}")
+ self.log(f"Best agent parameters saved to {output_csv_path}")
+
+
-[docs]
+[docs]
def initialize_population(self) -> List[NeuralNetworkAgent]:
"""
Initialize the population of neural network agents.
@@ -1630,7 +1794,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def select_process(self, agent: NeuralNetworkAgent) -> Tuple[object, float]:
"""
Select a process and time horizon for an agent to interact with.
@@ -1650,7 +1814,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def simulate_process(self, process: object, time: float) -> float:
"""
Simulate the selected process for the given time horizon.
@@ -1672,7 +1836,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def calculate_reward(self, initial_wealth: float, final_wealth: float) -> float:
"""
Calculate the reward based on the change in wealth.
@@ -1688,7 +1852,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def reinforce(self, agent: NeuralNetworkAgent, optimizer: optim.Optimizer, reward: float):
"""
Apply reinforcement learning update to an agent.
@@ -1709,7 +1873,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def mutate(self, agent: NeuralNetworkAgent):
"""
Apply mutation to an agent's neural network.
@@ -1724,7 +1888,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def select_elite(self, population: List[NeuralNetworkAgent]) -> List[NeuralNetworkAgent]:
"""
Select the elite agents from the population.
@@ -1740,7 +1904,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def crossover(self, parent1: NeuralNetworkAgent, parent2: NeuralNetworkAgent) -> NeuralNetworkAgent:
"""
Perform crossover between two parent agents to create a child agent.
@@ -1761,7 +1925,7 @@ Source code for ergodicity.agents.evolutionary_nn
-[docs]
+[docs]
def train(self, n_steps: int):
"""
Run the main training loop for the specified number of steps.
diff --git a/_modules/ergodicity/agents/portfolio.html b/_modules/ergodicity/agents/portfolio.html
index d30aac9..3b641d9 100644
--- a/_modules/ergodicity/agents/portfolio.html
+++ b/_modules/ergodicity/agents/portfolio.html
@@ -87,13 +87,12 @@ Source code for ergodicity.agents.portfolio
portfolio.visualize()
"""
-
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Any
-[docs]
+[docs]
class Portfolio:
"""
Portfolio Class
@@ -161,7 +160,7 @@ Source code for ergodicity.agents.portfolio
self.weight_history = [self.current_weights]
-[docs]
+[docs]
def simulate(self, timestep: float, time_period: float, total_time: float):
"""
Simulate the portfolio over time.
@@ -200,7 +199,7 @@ Source code for ergodicity.agents.portfolio
-[docs]
+[docs]
def visualize(self):
"""
Visualize the wealth and weight dynamics of the portfolio.
diff --git a/_modules/ergodicity/agents/probability_weighting.html b/_modules/ergodicity/agents/probability_weighting.html
index 66ddc73..6eef726 100644
--- a/_modules/ergodicity/agents/probability_weighting.html
+++ b/_modules/ergodicity/agents/probability_weighting.html
@@ -71,12 +71,10 @@ Source code for ergodicity.agents.probability_weighting
X = visualize_weighting(weighted_pdf, new_mu, sigma, timestep=0.01, num_samples=1000, t=1.0)
"""
-
import sympy as sp
from tenacity import wait_exponential_jitter
from ergodicity.configurations import *
from ergodicity.process.default_values import *
-
from ergodicity.tools.compute import random_variable_from_pdf
from ergodicity.tools.solve import apply_girsanov
import numpy as np
@@ -84,27 +82,35 @@ Source code for ergodicity.agents.probability_weighting
from scipy import stats
-[docs]
-def gbm_weighting(initial_mu, sigma):
+[docs]
+def gbm_weighting(initial_mu, sigma, time_horizon=1):
"""
Apply Girsanov's theorem to transform the probability measure of a Geometric Brownian Motion (GBM) process to a new measure.
The new measure is defined by adjusting the drift of the GBM process.
- :param initial_mu: Initial drift of the GBM process.
+ :param initial_mu: Initial drift of the GBM process. It corresponds to the intended time average of the process. So you insert the time average to get such probability weighting that taking expected value under the new measure gives the time average.
:type initial_mu: float
:param sigma: Volatility of the GBM process.
:type sigma: float
:return: Weighted probability density function (PDF) after applying Girsanov's theorem.
:rtype: sympy.core.add.Add
"""
- t = sp.symbols('t')
- new_drift = initial_mu - 0.5 * sigma**2
- weighted_pdf = apply_girsanov(initial_drift=initial_mu, new_drift=new_drift, diffusion=sigma, time_horizon=t)
+ # t = sp.symbols('t')
+ # new_drift = initial_mu - 0.5 * sigma**2
+ # weighted_pdf = apply_girsanov(initial_drift=initial_mu, new_drift=new_drift, diffusion=sigma, time_horizon=t)
+ # return weighted_pdf
+ x = sp.symbols('x')
+ new_drift = (initial_mu + 0.5 * sigma ** 2) * x
+ print(f'new_drift: {new_drift}')
+ initial_mu = initial_mu*x
+ sigma = sigma*x
+ weighted_pdf = apply_girsanov(initial_drift=initial_mu, new_drift=new_drift, diffusion=sigma,
+ time_horizon=time_horizon)
return weighted_pdf
-[docs]
+[docs]
def martingale_weighting(initial_mu, sigma):
"""
Apply Girsanov's theorem to transform the probability measure of a martingale process to a new measure.
@@ -123,38 +129,50 @@ Source code for ergodicity.agents.probability_weighting
return weighted_pdf
+
-[docs]
-def visualize_weighting(weighted_pdf, new_mu, sigma, timestep=timestep_default, num_samples=num_instances_default, t=t_default):
+[docs]
+def visualize_weighting(weighted_pdf, new_mu, sigma, timestep=0.01, num_samples=1000, t=1.0):
"""
Visualize the weighted stochastic process based on the adjusted drift and volatility parameters.
- :param weighted_pdf
+ :param weighted_pdf: Weighted probability density function.
:type weighted_pdf: sympy.core.add.Add
- :param new_mu: for now, it must be float
+ :param new_mu: New drift parameter.
:type new_mu: float
- :param sigma: for now, it must be float
+ :param sigma: Volatility parameter.
:type sigma: float
- :param timestep:
+ :param timestep: Time step for simulation.
:type timestep: float
- :param num_samples:
+ :param num_samples: Number of sample paths to simulate.
:type num_samples: int
- :param t:
+ :param t: Total time for simulation.
:type t: float
- :return: X
+ :return: Simulated paths.
:rtype: numpy.ndarray
"""
-
dt = timestep
+ num_steps = int(t / dt)
W_t = sp.symbols('W_t')
- dW_q = random_variable_from_pdf(weighted_pdf, x=W_t, num_samples=num_samples)
- dW_q = np.array(dW_q).astype(float)
- X = np.ones(num_samples)
- for i in range(1, int(t/dt)):
- dX = new_mu * dt + sigma * dW_q
- X += dX
- plt.plot(X)
+
+ X = np.ones((num_samples, num_steps))
+
+ for i in range(1, num_steps):
+ current_x = X[:, i - 1]
+ dW_q = dt**0.5 * random_variable_from_pdf(weighted_pdf, x=W_t, num_samples=num_samples, t=1)
+ print(dW_q)
+ dX = current_x * (new_mu * dt + sigma * dW_q)
+ print(dX)
+ X[:, i] = X[:, i - 1] + dX
+
+ plt.figure(figsize=(10, 6))
+ for path in X:
+ plt.plot(np.linspace(0, t, num_steps), path, alpha=0.1)
+ plt.title("Simulated Paths of Weighted Process")
+ plt.xlabel("Time")
+ plt.ylabel("Value")
plt.show()
+
return X
diff --git a/_modules/ergodicity/agents/sml.html b/_modules/ergodicity/agents/sml.html
index 2a56189..e68760d 100644
--- a/_modules/ergodicity/agents/sml.html
+++ b/_modules/ergodicity/agents/sml.html
@@ -112,8 +112,8 @@ Source code for ergodicity.agents.sml