Skip to content

Commit efb9717

Browse files
authored
[maintenance] Always use "executorlib_cache" instead of "cache" (#682)
* [maintenance] Always use "executorlib_cache" instead of "cache" * clean up tearDown functions
1 parent 4d2dd17 commit efb9717

13 files changed

+44
-51
lines changed

executorlib/executor/flux.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class FluxJobExecutor(BaseExecutor):
2929
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
3030
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
3131
recommended, as computers have a limited number of compute cores.
32-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
32+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
3333
max_cores (int): defines the number cores which can be used in parallel
3434
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
3535
- cores (int): number of MPI cores to be used for each function call
@@ -114,7 +114,7 @@ def __init__(
114114
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
115115
number of cores which can be used in parallel - just like the max_cores parameter. Using
116116
max_cores is recommended, as computers have a limited number of compute cores.
117-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
117+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
118118
max_cores (int): defines the number cores which can be used in parallel
119119
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
120120
- cores (int): number of MPI cores to be used for each function call
@@ -218,7 +218,7 @@ class FluxClusterExecutor(BaseExecutor):
218218
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
219219
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
220220
recommended, as computers have a limited number of compute cores.
221-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
221+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
222222
max_cores (int): defines the number cores which can be used in parallel
223223
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
224224
- cores (int): number of MPI cores to be used for each function call
@@ -294,7 +294,7 @@ def __init__(
294294
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
295295
number of cores which can be used in parallel - just like the max_cores parameter. Using
296296
max_cores is recommended, as computers have a limited number of compute cores.
297-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
297+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
298298
max_cores (int): defines the number cores which can be used in parallel
299299
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
300300
- cores (int): number of MPI cores to be used for each function call
@@ -409,7 +409,7 @@ def create_flux_executor(
409409
number of cores which can be used in parallel - just like the max_cores parameter. Using
410410
max_cores is recommended, as computers have a limited number of compute cores.
411411
max_cores (int): defines the number cores which can be used in parallel
412-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
412+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
413413
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
414414
- cores (int): number of MPI cores to be used for each function call
415415
- threads_per_core (int): number of OpenMP threads to be used for each function call

executorlib/executor/single.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class SingleNodeExecutor(BaseExecutor):
2929
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
3030
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
3131
recommended, as computers have a limited number of compute cores.
32-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
32+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
3333
max_cores (int): defines the number cores which can be used in parallel
3434
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
3535
- cores (int): number of MPI cores to be used for each function call
@@ -104,7 +104,7 @@ def __init__(
104104
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
105105
number of cores which can be used in parallel - just like the max_cores parameter. Using
106106
max_cores is recommended, as computers have a limited number of compute cores.
107-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
107+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
108108
max_cores (int): defines the number cores which can be used in parallel
109109
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
110110
- cores (int): number of MPI cores to be used for each function call
@@ -202,7 +202,7 @@ def create_single_node_executor(
202202
number of cores which can be used in parallel - just like the max_cores parameter. Using
203203
max_cores is recommended, as computers have a limited number of compute cores.
204204
max_cores (int): defines the number cores which can be used in parallel
205-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
205+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
206206
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
207207
- cores (int): number of MPI cores to be used for each function call
208208
- threads_per_core (int): number of OpenMP threads to be used for each function call

executorlib/executor/slurm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class SlurmClusterExecutor(BaseExecutor):
3030
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
3131
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
3232
recommended, as computers have a limited number of compute cores.
33-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
33+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
3434
max_cores (int): defines the number cores which can be used in parallel
3535
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
3636
- cores (int): number of MPI cores to be used for each function call
@@ -106,7 +106,7 @@ def __init__(
106106
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
107107
number of cores which can be used in parallel - just like the max_cores parameter. Using
108108
max_cores is recommended, as computers have a limited number of compute cores.
109-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
109+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
110110
max_cores (int): defines the number cores which can be used in parallel
111111
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
112112
- cores (int): number of MPI cores to be used for each function call
@@ -207,7 +207,7 @@ class SlurmJobExecutor(BaseExecutor):
207207
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
208208
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
209209
recommended, as computers have a limited number of compute cores.
210-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
210+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
211211
max_cores (int): defines the number cores which can be used in parallel
212212
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
213213
- cores (int): number of MPI cores to be used for each function call
@@ -287,7 +287,7 @@ def __init__(
287287
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
288288
number of cores which can be used in parallel - just like the max_cores parameter. Using
289289
max_cores is recommended, as computers have a limited number of compute cores.
290-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
290+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
291291
max_cores (int): defines the number cores which can be used in parallel
292292
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
293293
- cores (int): number of MPI cores to be used for each function call
@@ -389,7 +389,7 @@ def create_slurm_executor(
389389
number of cores which can be used in parallel - just like the max_cores parameter. Using
390390
max_cores is recommended, as computers have a limited number of compute cores.
391391
max_cores (int): defines the number cores which can be used in parallel
392-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
392+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
393393
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
394394
- cores (int): number of MPI cores to be used for each function call
395395
- threads_per_core (int): number of OpenMP threads to be used for each function call

executorlib/task_scheduler/file/task_scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
class FileTaskScheduler(TaskSchedulerBase):
2828
def __init__(
2929
self,
30-
cache_directory: str = "cache",
30+
cache_directory: str = "executorlib_cache",
3131
resource_dict: Optional[dict] = None,
3232
execute_function: Callable = execute_with_pysqa,
3333
terminate_function: Optional[Callable] = None,
@@ -39,7 +39,7 @@ def __init__(
3939
Initialize the FileExecutor.
4040
4141
Args:
42-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
42+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
4343
resource_dict (dict): A dictionary of resources required by the task. With the following keys:
4444
- cores (int): number of MPI cores to be used for each function call
4545
- cwd (str/None): current working directory where the parallel python task is executed

executorlib/task_scheduler/interactive/shared.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def execute_tasks(
4141
this look up for security reasons. So on MacOS it is required to set this
4242
option to true
4343
init_function (Callable): optional function to preset arguments for functions which are submitted later
44-
cache_directory (str, optional): The directory to store cache files. Defaults to "cache".
44+
cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache".
4545
queue_join_on_shutdown (bool): Join communication queue when thread is closed. Defaults to True.
4646
log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
4747
"""

tests/test_cache_backend_execute.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def get_error(a):
2828
)
2929
class TestSharedFunctions(unittest.TestCase):
3030
def test_execute_function_mixed(self):
31-
cache_directory = os.path.abspath("cache")
31+
cache_directory = os.path.abspath("executorlib_cache")
3232
os.makedirs(cache_directory, exist_ok=True)
3333
task_key, data_dict = serialize_funct_h5(
3434
fn=my_funct,
@@ -56,7 +56,7 @@ def test_execute_function_mixed(self):
5656
self.assertEqual(future_file_obj.result(), 3)
5757

5858
def test_execute_function_args(self):
59-
cache_directory = os.path.abspath("cache")
59+
cache_directory = os.path.abspath("executorlib_cache")
6060
os.makedirs(cache_directory, exist_ok=True)
6161
task_key, data_dict = serialize_funct_h5(
6262
fn=my_funct,
@@ -84,7 +84,7 @@ def test_execute_function_args(self):
8484
self.assertEqual(future_file_obj.result(), 3)
8585

8686
def test_execute_function_kwargs(self):
87-
cache_directory = os.path.abspath("cache")
87+
cache_directory = os.path.abspath("executorlib_cache")
8888
os.makedirs(cache_directory, exist_ok=True)
8989
task_key, data_dict = serialize_funct_h5(
9090
fn=my_funct,
@@ -112,7 +112,7 @@ def test_execute_function_kwargs(self):
112112
self.assertEqual(future_file_obj.result(), 3)
113113

114114
def test_execute_function_error(self):
115-
cache_directory = os.path.abspath("cache")
115+
cache_directory = os.path.abspath("executorlib_cache")
116116
os.makedirs(cache_directory, exist_ok=True)
117117
task_key, data_dict = serialize_funct_h5(
118118
fn=get_error,
@@ -142,5 +142,4 @@ def test_execute_function_error(self):
142142
future_file_obj.result()
143143

144144
def tearDown(self):
145-
if os.path.exists("cache"):
146-
shutil.rmtree("cache")
145+
shutil.rmtree("executorlib_cache", ignore_errors=True)

tests/test_cache_fileexecutor_mpi.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,5 +40,4 @@ def test_executor(self):
4040
self.assertTrue(fs1.done())
4141

4242
def tearDown(self):
43-
if os.path.exists("cache"):
44-
shutil.rmtree("cache")
43+
shutil.rmtree("executorlib_cache", ignore_errors=True)

tests/test_cache_fileexecutor_serial.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def test_executor_function(self):
9393
"resource_dict": {},
9494
}
9595
)
96-
cache_dir = os.path.abspath("cache")
96+
cache_dir = os.path.abspath("executorlib_cache")
9797
os.makedirs(cache_dir, exist_ok=True)
9898
process = Thread(
9999
target=execute_tasks_h5,
@@ -134,7 +134,7 @@ def test_executor_function_dependence_kwargs(self):
134134
"resource_dict": {},
135135
}
136136
)
137-
cache_dir = os.path.abspath("cache")
137+
cache_dir = os.path.abspath("executorlib_cache")
138138
os.makedirs(cache_dir, exist_ok=True)
139139
process = Thread(
140140
target=execute_tasks_h5,
@@ -175,7 +175,7 @@ def test_executor_function_dependence_args(self):
175175
"resource_dict": {},
176176
}
177177
)
178-
cache_dir = os.path.abspath("cache")
178+
cache_dir = os.path.abspath("executorlib_cache")
179179
os.makedirs(cache_dir, exist_ok=True)
180180
process = Thread(
181181
target=execute_tasks_h5,
@@ -203,5 +203,4 @@ def test_execute_in_subprocess_errors(self):
203203
execute_in_subprocess(file_name=__file__, command=[], backend="flux")
204204

205205
def tearDown(self):
206-
if os.path.exists("cache"):
207-
shutil.rmtree("cache")
206+
shutil.rmtree("executorlib_cache", ignore_errors=True)

tests/test_fluxclusterexecutor.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ def mpi_funct(i):
3333
class TestCacheExecutorPysqa(unittest.TestCase):
3434
def test_executor(self):
3535
with FluxClusterExecutor(
36-
resource_dict={"cores": 2, "cwd": "cache"},
36+
resource_dict={"cores": 2, "cwd": "executorlib_cache"},
3737
block_allocation=False,
38-
cache_directory="cache",
38+
cache_directory="executorlib_cache",
3939
) as exe:
4040
cloudpickle_register(ind=1)
4141
fs1 = exe.submit(mpi_funct, 1)
@@ -44,5 +44,4 @@ def test_executor(self):
4444
self.assertTrue(fs1.done())
4545

4646
def tearDown(self):
47-
if os.path.exists("cache"):
48-
shutil.rmtree("cache")
47+
shutil.rmtree("executorlib_cache", ignore_errors=True)

tests/test_mpiexecspawner.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ def test_execute_task_parallel(self):
503503

504504
class TestFuturePoolCache(unittest.TestCase):
505505
def tearDown(self):
506-
shutil.rmtree("./cache")
506+
shutil.rmtree("executorlib_cache", ignore_errors=True)
507507

508508
@unittest.skipIf(
509509
skip_h5py_test, "h5py is not installed, so the h5py tests are skipped."
@@ -519,7 +519,7 @@ def test_execute_task_cache(self):
519519
cores=1,
520520
openmpi_oversubscribe=False,
521521
spawner=MpiExecSpawner,
522-
cache_directory="./cache",
522+
cache_directory="executorlib_cache",
523523
)
524524
self.assertEqual(f.result(), 1)
525525
q.join()
@@ -538,6 +538,6 @@ def test_execute_task_cache_failed_no_argument(self):
538538
cores=1,
539539
openmpi_oversubscribe=False,
540540
spawner=MpiExecSpawner,
541-
cache_directory="./cache",
541+
cache_directory="executorlib_cache",
542542
)
543543
q.join()

tests/test_singlenodeexecutor_cache.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def get_error(a):
2222
)
2323
class TestCacheFunctions(unittest.TestCase):
2424
def test_cache_data(self):
25-
cache_directory = "./cache"
25+
cache_directory = os.path.abspath("executorlib_cache")
2626
with SingleNodeExecutor(cache_directory=cache_directory) as exe:
2727
self.assertTrue(exe)
2828
future_lst = [exe.submit(sum, [i, i]) for i in range(1, 4)]
@@ -35,7 +35,7 @@ def test_cache_data(self):
3535
)
3636

3737
def test_cache_error(self):
38-
cache_directory = "./cache_error"
38+
cache_directory = os.path.abspath("cache_error")
3939
with SingleNodeExecutor(cache_directory=cache_directory) as exe:
4040
self.assertTrue(exe)
4141
cloudpickle_register(ind=1)
@@ -44,7 +44,5 @@ def test_cache_error(self):
4444
print(f.result())
4545

4646
def tearDown(self):
47-
if os.path.exists("cache"):
48-
shutil.rmtree("cache")
49-
if os.path.exists("cache_error"):
50-
shutil.rmtree("cache_error")
47+
shutil.rmtree("executorlib_cache", ignore_errors=True)
48+
shutil.rmtree("cache_error", ignore_errors=True)

tests/test_singlenodeexecutor_mpi.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def test_errors(self):
8383

8484
class TestExecutorBackendCache(unittest.TestCase):
8585
def tearDown(self):
86-
shutil.rmtree("./cache")
86+
shutil.rmtree("executorlib_cache", ignore_errors=True)
8787

8888
@unittest.skipIf(
8989
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
@@ -93,7 +93,7 @@ def test_meta_executor_parallel_cache(self):
9393
max_workers=2,
9494
resource_dict={"cores": 2},
9595
block_allocation=True,
96-
cache_directory="./cache",
96+
cache_directory="executorlib_cache",
9797
) as exe:
9898
cloudpickle_register(ind=1)
9999
time_1 = time.time()

0 commit comments

Comments
 (0)