Skip to content

Commit 7e832e3

Browse files
committed
removed all memetic algo related stuff
1 parent d381011 commit 7e832e3

File tree

6 files changed

+27
-317
lines changed

6 files changed

+27
-317
lines changed

Diff for: kernel_tuner/interface.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,7 @@
5858
pso,
5959
random_sample,
6060
simulated_annealing,
61-
ensemble,
62-
memetic
61+
ensemble
6362
)
6463

6564
strategy_map = {
@@ -79,7 +78,6 @@
7978
"firefly_algorithm": firefly_algorithm,
8079
"bayes_opt": bayes_opt,
8180
"ensemble": ensemble,
82-
"memetic": memetic,
8381
}
8482

8583

Diff for: kernel_tuner/runners/parallel.py

+4-24
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,7 @@ def run(self, parameter_space=None, tuning_options=None, ensemble=None, searchsp
5151
if self.actors is None:
5252
runner_attributes = [self.kernel_source, self.kernel_options, self.device_options, self.iterations, self.observers]
5353
self.actors = [create_actor_on_device(*runner_attributes, id=_id, cache_manager=self.cache_manager, simulation_mode=self.simulation_mode) for _id in range(self.num_gpus)]
54-
# actors_ready_futures = [actor.__ray_ready__.remote() for actor in futures]
55-
# ray.wait(actors_ready_futures, num_returns=len(actors_ready_futures), timeout=None)
56-
# self.actors = futures
57-
58-
54+
5955
# Check if all GPUs are of the same type
6056
if not self.simulation_mode and not self._check_gpus_equals():
6157
raise GPUTypeMismatchError(f"Different GPU types found")
@@ -137,43 +133,28 @@ def multi_strategy_parallel_execution(self, ensemble, tuning_options, searchspac
137133
task = actor.execute.remote(strategy=strategy, searchspace=searchspace, tuning_options=remote_tuning_options)
138134
pending_tasks[task] = actor
139135

140-
# Process results to extract population and candidates for further use
141-
results, tuning_options_list, population, candidates = self._process_results_ensemble(all_results)
142-
143-
# Update tuning options for memetic strategies
144-
if population:
145-
tuning_options.strategy_options["population"] = population
146-
if candidates:
147-
tuning_options.strategy_options["candidates"] = candidates
136+
# Process results
137+
results, tuning_options_list = self._process_results_ensemble(all_results)
148138

149139
return results, tuning_options_list
150140

151141

152142
def _setup_tuning_options(self, tuning_options, evaluations_per_strategy):
153143
new_tuning_options = copy.deepcopy(tuning_options)
154-
if "candidates" in tuning_options.strategy_options:
155-
if len(tuning_options.strategy_options["candidates"]) > 0:
156-
new_tuning_options.strategy_options["candidate"] = tuning_options.strategy_options["candidates"].pop(0)
157144
new_tuning_options.strategy_options["max_fevals"] = evaluations_per_strategy.pop(0)
158145
# the stop criterion uses the max feval in tuning options for some reason
159146
new_tuning_options["max_fevals"] = new_tuning_options.strategy_options["max_fevals"]
160147
return new_tuning_options
161148

162149
def _process_results_ensemble(self, all_results):
163-
population = [] # for memetic strategy
164-
candidates = [] # for memetic strategy
165150
results = []
166151
tuning_options_list = []
167152

168153
for (strategy_results, tuning_options) in all_results:
169-
if "old_candidate" in tuning_options.strategy_options:
170-
candidates.append(tuning_options.strategy_options["old_candidate"])
171-
if "candidate" in tuning_options.strategy_options:
172-
population.append(tuning_options.strategy_options["candidate"])
173154
results.extend(strategy_results)
174155
tuning_options_list.append(tuning_options)
175156

176-
return results, tuning_options_list, population, candidates
157+
return results, tuning_options_list
177158

178159

179160
def parallel_function_evaluation(self, tuning_options, parameter_space):
@@ -201,7 +182,6 @@ def _calculate_simulated_time(self, tuning_options_list):
201182
simulated_times = []
202183
for tuning_options in tuning_options_list:
203184
simulated_times.append(tuning_options.simulated_time)
204-
#simulated_times = [tuning_options.simulated_time for tuning_options in tuning_options_list]
205185
return max(simulated_times)
206186

207187
def _check_gpus_equals(self):

Diff for: kernel_tuner/strategies/common.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,7 @@ def make_strategy_options_doc(strategy_options):
5252

5353
def get_options(strategy_options, options):
5454
"""Get the strategy-specific options or their defaults from user-supplied strategy_options."""
55-
accepted = list(options.keys()) + ["max_fevals", "time_limit", "ensemble", "candidates", "candidate", "population",
56-
"maxiter", "lsd", "popsize", "alsd", "split_searchspace", "check_and_retrieve"]
55+
accepted = list(options.keys()) + ["max_fevals", "time_limit", "ensemble", "check_and_retrieve"]
5756
for key in strategy_options:
5857
if key not in accepted:
5958
raise ValueError(f"Unrecognized option {key} in strategy_options")

Diff for: kernel_tuner/strategies/genetic_algorithm.py

+16-44
Original file line numberDiff line numberDiff line change
@@ -7,42 +7,39 @@
77
from kernel_tuner.searchspace import Searchspace
88
from kernel_tuner.strategies import common
99
from kernel_tuner.strategies.common import CostFunc
10-
from kernel_tuner.runners.parallel import ParallelRunner
1110

1211
_options = dict(
1312
popsize=("population size", 20),
1413
maxiter=("maximum number of generations", 100),
1514
method=("crossover method to use, choose any from single_point, two_point, uniform, disruptive_uniform", "uniform"),
1615
mutation_chance=("chance to mutate is 1 in mutation_chance", 10),
17-
population=("initial population", None),
1816
)
1917

2018

2119
def tune(searchspace: Searchspace, runner, tuning_options):
2220

2321
options = tuning_options.strategy_options
24-
pop_size, generations, method, mutation_chance, population = common.get_options(options, _options)
22+
pop_size, generations, method, mutation_chance = common.get_options(options, _options)
2523
crossover = supported_methods[method]
2624

2725
best_score = 1e20
2826
cost_func = CostFunc(searchspace, tuning_options, runner)
2927

30-
if not population:
31-
population = list(list(p) for p in searchspace.get_random_sample(pop_size))
32-
else:
33-
pop_size = len(population)
34-
35-
old_population = population
28+
population = list(list(p) for p in searchspace.get_random_sample(pop_size))
29+
3630
for generation in range(generations):
3731

38-
# Evaluate the entire population
39-
try:
40-
old_population = population
41-
weighted_population = evaluate_population(runner, cost_func, population)
42-
except util.StopCriterionReached as e:
43-
if tuning_options.verbose:
44-
print(e)
45-
return cost_func.results
32+
# determine fitness of population members
33+
weighted_population = []
34+
for dna in population:
35+
try:
36+
time = cost_func(dna, check_restrictions=False)
37+
except util.StopCriterionReached as e:
38+
if tuning_options.verbose:
39+
print(e)
40+
return cost_func.results
41+
42+
weighted_population.append((dna, time))
4643

4744
# population is sorted such that better configs have higher chance of reproducing
4845
weighted_population.sort(key=lambda x: x[1])
@@ -72,8 +69,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
7269
break
7370

7471
# could combine old + new generation here and do a selection
75-
tuning_options.strategy_options["population"] = old_population # for memetic strategy
76-
tuning_options.strategy_options["candidates"] = population # for memetic strategy
72+
7773
return cost_func.results
7874

7975

@@ -180,28 +176,4 @@ def disruptive_uniform_crossover(dna1, dna2):
180176
"two_point": two_point_crossover,
181177
"uniform": uniform_crossover,
182178
"disruptive_uniform": disruptive_uniform_crossover,
183-
}
184-
185-
def evaluate_population(runner, cost_func, population):
186-
"""
187-
Evaluate the population based on the type of runner.
188-
189-
Parameters:
190-
- runner: The runner (ParallelRunner or SequentialRunner) determining how to process evaluations.
191-
- cost_func: A function capable of evaluating the population.
192-
- population: List of individuals to be evaluated.
193-
194-
Returns:
195-
- List of tuples (dna, fitness_score) representing the population and their evaluation results.
196-
"""
197-
if isinstance(runner, ParallelRunner):
198-
# Process the whole population at once if using a ParallelRunner
199-
results = cost_func(population, check_restrictions=False)
200-
return list(zip(population, results))
201-
else:
202-
# Process each individual sequentially for SequentialRunner
203-
weighted_population = []
204-
for dna in population:
205-
time = cost_func(dna, check_restrictions=False) # Cost function called with a single-element list
206-
weighted_population.append((dna, time))
207-
return weighted_population
179+
}

Diff for: kernel_tuner/strategies/greedy_ils.py

+5-20
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,15 @@
99
_options = dict(neighbor=("Method for selecting neighboring nodes, choose from Hamming or adjacent", "Hamming"),
1010
restart=("controls greedyness, i.e. whether to restart from a position as soon as an improvement is found", True),
1111
no_improvement=("number of evaluations to exceed without improvement before restarting", 50),
12-
random_walk=("controls greedyness, i.e. whether to restart from a position as soon as an improvement is found", 0.3),
13-
candidate=("initial candidate for the search", None))
12+
random_walk=("controls greedyness, i.e. whether to restart from a position as soon as an improvement is found", 0.3))
1413

1514
def tune(searchspace: Searchspace, runner, tuning_options):
1615

1716
dna_size = len(searchspace.tune_params.keys())
1817

1918
options = tuning_options.strategy_options
2019

21-
neighbor, restart, no_improvement, randomwalk, candidate = common.get_options(options, _options)
20+
neighbor, restart, no_improvement, randomwalk = common.get_options(options, _options)
2221

2322
perm_size = int(randomwalk * dna_size)
2423
if perm_size == 0:
@@ -32,28 +31,16 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3231
cost_func = CostFunc(searchspace, tuning_options, runner)
3332

3433
#while searching
35-
if not candidate:
36-
candidate = searchspace.get_random_sample(1)[0]
37-
old_candidate = candidate # for memetic strategy
38-
try:
39-
best_score = cost_func(candidate, check_restrictions=False)
40-
except util.StopCriterionReached as e:
41-
tuning_options.strategy_options["old_candidate"] = old_candidate # for memetic strategy
42-
tuning_options.strategy_options["candidate"] = candidate # for memetic strategy
43-
if tuning_options.verbose:
44-
print(e)
45-
return cost_func.results
34+
candidate = searchspace.get_random_sample(1)[0]
35+
best_score = cost_func(candidate, check_restrictions=False)
4636

4737
last_improvement = 0
4838
while fevals < max_fevals:
4939

5040
try:
51-
old_candidate = candidate # for memetic strategy
5241
candidate = base_hillclimb(candidate, neighbor, max_fevals, searchspace, tuning_options, cost_func, restart=restart, randomize=True)
5342
new_score = cost_func(candidate, check_restrictions=False)
5443
except util.StopCriterionReached as e:
55-
tuning_options.strategy_options["old_candidate"] = old_candidate # for memetic strategy
56-
tuning_options.strategy_options["candidate"] = candidate # for memetic strategy
5744
if tuning_options.verbose:
5845
print(e)
5946
return cost_func.results
@@ -66,8 +53,6 @@ def tune(searchspace: Searchspace, runner, tuning_options):
6653

6754
# Instead of full restart, permute the starting candidate
6855
candidate = random_walk(candidate, perm_size, no_improvement, last_improvement, searchspace)
69-
tuning_options.strategy_options["old_candidate"] = old_candidate # for memetic strategy
70-
tuning_options.strategy_options["candidate"] = candidate # for memetic strategy
7156
return cost_func.results
7257

7358

@@ -78,4 +63,4 @@ def random_walk(indiv, permutation_size, no_improve, last_improve, searchspace:
7863
return searchspace.get_random_sample(1)[0]
7964
for _ in range(permutation_size):
8065
indiv = mutate(indiv, 0, searchspace, cache=False)
81-
return indiv
66+
return indiv

0 commit comments

Comments
 (0)