|
20 | 20 |
|
21 | 21 | class SlurmClusterExecutor(BaseExecutor):
|
22 | 22 | """
|
23 |
| - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or |
24 |
| - preferable the flux framework for distributing python functions within a given resource allocation. In contrast to |
25 |
| - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not |
26 |
| - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly |
27 |
| - in an interactive Jupyter notebook. |
| 23 | + The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload |
| 24 | + manager or preferable the flux framework for distributing python functions within a given resource allocation. In |
| 25 | + contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a serial |
| 26 | + python process and does not require the python script to be executed with MPI. It is even possible to execute the |
| 27 | + executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook. |
28 | 28 |
|
29 | 29 | Args:
|
30 | 30 | max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
|
@@ -62,7 +62,7 @@ class SlurmClusterExecutor(BaseExecutor):
|
62 | 62 | Examples:
|
63 | 63 | ```
|
64 | 64 | >>> import numpy as np
|
65 |
| - >>> from executorlib.executor.slurm import SlurmClusterExecutor |
| 65 | + >>> from executorlib import SlurmClusterExecutor |
66 | 66 | >>>
|
67 | 67 | >>> def calc(i, j, k):
|
68 | 68 | >>> from mpi4py import MPI
|
@@ -96,12 +96,11 @@ def __init__(
|
96 | 96 | plot_dependency_graph_filename: Optional[str] = None,
|
97 | 97 | ):
|
98 | 98 | """
|
99 |
| - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, |
100 |
| - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The |
101 |
| - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used |
102 |
| - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be |
103 |
| - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor |
104 |
| - requires the SLURM workload manager to be installed on the system. |
| 99 | + The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload |
| 100 | + manager or preferable the flux framework for distributing python functions within a given resource allocation. |
| 101 | + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmClusterExecutor can be executed in a |
| 102 | + serial python process and does not require the python script to be executed with MPI. It is even possible to |
| 103 | + execute the executorlib.SlurmClusterExecutor directly in an interactive Jupyter notebook. |
105 | 104 |
|
106 | 105 | Args:
|
107 | 106 | max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
|
@@ -196,11 +195,11 @@ def __init__(
|
196 | 195 |
|
197 | 196 | class SlurmJobExecutor(BaseExecutor):
|
198 | 197 | """
|
199 |
| - The executorlib.Executor leverages either the message passing interface (MPI), the SLURM workload manager or |
| 198 | + The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload manager or |
200 | 199 | preferable the flux framework for distributing python functions within a given resource allocation. In contrast to
|
201 |
| - the mpi4py.futures.MPIPoolExecutor the executorlib.Executor can be executed in a serial python process and does not |
202 |
| - require the python script to be executed with MPI. It is even possible to execute the executorlib.Executor directly |
203 |
| - in an interactive Jupyter notebook. |
| 200 | + the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial python process and |
| 201 | + does not require the python script to be executed with MPI. It is even possible to execute the |
| 202 | + executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook. |
204 | 203 |
|
205 | 204 | Args:
|
206 | 205 | max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
|
@@ -241,7 +240,7 @@ class SlurmJobExecutor(BaseExecutor):
|
241 | 240 | Examples:
|
242 | 241 | ```
|
243 | 242 | >>> import numpy as np
|
244 |
| - >>> from executorlib.executor.slurm import SlurmJobExecutor |
| 243 | + >>> from executorlib import SlurmJobExecutor |
245 | 244 | >>>
|
246 | 245 | >>> def calc(i, j, k):
|
247 | 246 | >>> from mpi4py import MPI
|
@@ -274,12 +273,11 @@ def __init__(
|
274 | 273 | plot_dependency_graph_filename: Optional[str] = None,
|
275 | 274 | ):
|
276 | 275 | """
|
277 |
| - Instead of returning a executorlib.Executor object this function returns either a executorlib.mpi.PyMPIExecutor, |
278 |
| - executorlib.slurm.PySlurmExecutor or executorlib.flux.PyFluxExecutor depending on which backend is available. The |
279 |
| - executorlib.flux.PyFluxExecutor is the preferred choice while the executorlib.mpi.PyMPIExecutor is primarily used |
280 |
| - for development and testing. The executorlib.flux.PyFluxExecutor requires flux-core from the flux-framework to be |
281 |
| - installed and in addition flux-sched to enable GPU scheduling. Finally, the executorlib.slurm.PySlurmExecutor |
282 |
| - requires the SLURM workload manager to be installed on the system. |
| 276 | + The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload |
| 277 | + manager or preferable the flux framework for distributing python functions within a given resource allocation. |
| 278 | + In contrast to the mpi4py.futures.MPIPoolExecutor the executorlib.SlurmJobExecutor can be executed in a serial |
| 279 | + python process and does not require the python script to be executed with MPI. It is even possible to execute |
| 280 | + the executorlib.SlurmJobExecutor directly in an interactive Jupyter notebook. |
283 | 281 |
|
284 | 282 | Args:
|
285 | 283 | max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
|
|
0 commit comments