diff --git a/.github/workflows/extra.yml b/.github/workflows/extra.yml index a9f319684..abf9f0a27 100644 --- a/.github/workflows/extra.yml +++ b/.github/workflows/extra.yml @@ -11,43 +11,43 @@ jobs: matrix: os: [ubuntu-latest] mpi-version: [mpich] - python-version: ["3.10", "3.11", "3.12"] - pydantic-version: ["2.8.2"] + python-version: ['3.10', '3.11', '3.12', '3.13'] + pydantic-version: ['2.8.2'] comms-type: [m, l] include: - os: macos-latest - python-version: 3.11 + python-version: '3.13' mpi-version: mpich - pydantic-version: "2.8.2" + pydantic-version: '2.8.2' comms-type: m - os: macos-latest - python-version: 3.11 + python-version: '3.13' mpi-version: mpich - pydantic-version: "2.8.2" + pydantic-version: '2.8.2' comms-type: l - os: ubuntu-latest - python-version: "3.10" + python-version: '3.12' mpi-version: mpich - pydantic-version: "2.8.2" + pydantic-version: '2.8.2' comms-type: t - os: ubuntu-latest - mpi-version: "openmpi" - pydantic-version: "2.8.2" - python-version: "3.12" + mpi-version: 'openmpi' + pydantic-version: '2.8.2' + python-version: '3.12' comms-type: l - os: ubuntu-latest mpi-version: mpich - python-version: "3.10" - pydantic-version: "1.10.17" + python-version: '3.12' + pydantic-version: '1.10.17' comms-type: m - os: ubuntu-latest mpi-version: mpich - python-version: "3.10" - pydantic-version: "1.10.17" + python-version: '3.12' + pydantic-version: '1.10.17' comms-type: l env: - HYDRA_LAUNCHER: "fork" + HYDRA_LAUNCHER: 'fork' TERM: xterm-256color GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -61,7 +61,7 @@ jobs: uses: conda-incubator/setup-miniconda@v3 with: activate-environment: condaenv - miniconda-version: "latest" + miniconda-version: 'latest' python-version: ${{ matrix.python-version }} channels: conda-forge channel-priority: flexible @@ -75,8 +75,8 @@ jobs: - name: Install Ubuntu compilers if: matrix.os == 'ubuntu-latest' run: | - conda install gcc_linux-64 - pip install nlopt==2.9.0 + conda install gcc_linux-64 + pip install nlopt==2.9.0 # Roundabout solution on macos for proper linking with mpicc - name: Install macOS compilers @@ -93,22 +93,22 @@ jobs: run: | conda env update --file install/gen_deps_environment.yml - - name: Install gpcam - if: matrix.python-version <= '3.13' + - name: Install gpcam and octave # Neither yet support 3.13 + if: matrix.python-version <= '3.12' run: | pip install gpcam + conda install octave - - name: Install surmise + - name: Install surmise and Tasmanian if: matrix.os == 'ubuntu-latest' run: | pip install --upgrade git+https://github.com/bandframework/surmise.git + pip install Tasmanian --user - name: Install generator dependencies for Ubuntu tests - if: matrix.os == 'ubuntu-latest' && matrix.python-version != '3.12' + if: matrix.os == 'ubuntu-latest' && matrix.python-version <= '3.12' run: | - sudo apt-get install bc - pip install -r install/ubuntu_no312.txt - pip install Tasmanian --user + pip install scikit-build packaging - name: Install Balsam on Pydantic 1 if: matrix.pydantic-version == '1.10.17' @@ -120,10 +120,10 @@ jobs: - name: Install other testing dependencies run: | - conda install octave pip install -r install/testing_requirements.txt pip install -r install/misc_feature_requirements.txt source install/install_ibcdfo.sh + conda install numpy scipy - name: Install libEnsemble, flake8, lock environment run: | @@ -131,13 +131,12 @@ jobs: pip install -e . flake8 libensemble - - name: Remove test for persistent Tasmanian, Surmise on Python 3.12 - if: matrix.python-version >= '3.12' + - name: Remove test using octave, gpcam on Python 3.13 + if: matrix.python-version >= '3.13' run: | - rm ./libensemble/tests/regression_tests/test_persistent_tasmanian.py - rm ./libensemble/tests/regression_tests/test_persistent_tasmanian_async.py - rm ./libensemble/tests/regression_tests/test_persistent_surmise_calib.py - rm ./libensemble/tests/regression_tests/test_persistent_surmise_killsims.py + rm ./libensemble/tests/regression_tests/test_persistent_fd_param_finder.py # needs octave, which doesn't yet support 3.13 + rm ./libensemble/tests/regression_tests/test_persistent_aposmm_external_localopt.py # needs octave, which doesn't yet support 3.13 + rm ./libensemble/tests/regression_tests/test_gpCAM.py # needs gpcam, which doesn't build on 3.13 - name: Install redis/proxystore on Pydantic 2 if: matrix.pydantic-version == '2.8.2' diff --git a/docs/platforms/aurora.rst b/docs/platforms/aurora.rst index 864d5bedf..4189de840 100644 --- a/docs/platforms/aurora.rst +++ b/docs/platforms/aurora.rst @@ -12,10 +12,16 @@ nodes. Configuring Python and Installation ----------------------------------- -To obtain Python use:: +To obtain Python and create a virtual environment: + +.. code-block:: console - module use /soft/modulefiles module load frameworks + python -m venv /path/to-venv --system-site-packages + . /path/to-venv/bin/activate + +where ``/path/to-venv`` can be anywhere you have write access. For future sessions, +just load the frameworks module and run the activate line. To obtain libEnsemble:: @@ -31,7 +37,7 @@ To run the :doc:`forces_gpu<../tutorials/forces_gpu_tutorial>` tutorial on Aurora. To obtain the example you can git clone libEnsemble - although only -the forces sub-directory is needed:: +the ``forces`` sub-directory is strictly needed:: git clone https://github.com/Libensemble/libensemble cd libensemble/libensemble/tests/scaling_tests/forces/forces_app @@ -44,9 +50,11 @@ Now go to forces_gpu directory:: cd ../forces_gpu -To make use of all available GPUs, open ``run_libe_forces.py`` and adjust -the exit_criteria to do more simulations. The following will do two -simulations for each worker:: +To make use of all available GPUs, open **run_libe_forces.py** and adjust +the ``exit_criteria`` to perform more simulations. The following will run two +simulations for each worker: + +.. code-block:: python # Instruct libEnsemble to exit after this many simulations ensemble.exit_criteria = ExitCriteria(sim_max=nsim_workers*2) @@ -54,17 +62,16 @@ simulations for each worker:: Now grab an interactive session on two nodes (or use the batch script at ``../submission_scripts/submit_pbs_aurora.sh``):: - qsub -A -l select=2 -l walltime=15:00 -lfilesystems=home -q EarlyAppAccess -I + qsub -A -l select=2 -l walltime=15:00 -lfilesystems=home:flare -q debug -I Once in the interactive session, you may need to reload the frameworks module:: cd $PBS_O_WORKDIR - module use /soft/modulefiles - module load frameworks + . /path/to-venv/bin/activate Then in the session run:: - python run_libe_forces.py --comms local --nworkers 13 + python run_libe_forces.py -n 13 This provides twelve workers for running simulations (one for each GPU across two nodes). An extra worker is added to run the persistent generator. The @@ -72,12 +79,28 @@ GPU settings for each worker simulation are printed. Looking at ``libE_stats.txt`` will provide a summary of the runs. +Now try running:: + + ./cleanup.sh + python run_libe_forces.py -n 7 + +And you will see it runs with two cores and two GPUs are used per +worker. The **forces** example automatically uses the GPUs available to +each worker. + +Live viewing GPU usage +---------------------- + +To see GPU usage, SSH into a compute node you are on in another window and run:: + + module load xpu-smi + watch -n 0.1 xpu-smi dump -d -1 -m 0 -n 1 + Using tiles as GPUs ------------------- -If you wish to treat each tile as its own GPU, then add the *libE_specs* -option ``use_tiles_as_gpus=True``, so the *libE_specs* block of -``run_libe_forces.py`` becomes: +To treat each tile as its own GPU, add the ``use_tiles_as_gpus=True`` option +to the ``libE_specs`` block in **run_libe_forces.py**: .. code-block:: python @@ -90,19 +113,45 @@ option ``use_tiles_as_gpus=True``, so the *libE_specs* block of Now you can run again but with twice the workers for running simulations (each will use one GPU tile):: - python run_libe_forces.py --comms local --nworkers 25 + python run_libe_forces.py -n 25 + + +Running generator on the manager +-------------------------------- + +An alternative is to run the generator on a thread on the manager. The +number of workers can then be set to the number of simulation workers. + +Change the ``libE_specs`` in **run_libe_forces.py** as follows: + +.. code-block:: python + + nsim_workers = ensemble.nworkers + + # Persistent gen does not need resources + ensemble.libE_specs = LibeSpecs( + gen_on_manager=True, + + +then we can run with 12 (instead of 13) workers:: + + python run_libe_forces.py -n 12 + +Dynamic resource assignment +--------------------------- -Note that the *forces* example will automatically use the GPUs available to -each worker (with one MPI rank per GPU), so if fewer workers are provided, -more than one GPU will be used per simulation. +In the **forces** directory you will also find: -Also see ``forces_gpu_var_resources`` and ``forces_multi_app`` examples for -cases that use varying processor/GPU counts per simulation. +* ``forces_gpu_var_resources`` uses varying processor/GPU counts per simulation. +* ``forces_multi_app`` uses varying processor/GPU counts per simulation and also + uses two different user executables, one which is CPU-only and one which + uses GPUs. This allows highly efficient use of nodes for multi-application + ensembles. Demonstration ------------- -Note that a video demonstration_ of the *forces_gpu* example on *Frontier* +Note that a video demonstration_ of the *forces_gpu* example on **Frontier** is also available. The workflow is identical when running on Aurora, with the exception of different compiler options and numbers of workers (because the numbers of GPUs on a node differs). diff --git a/install/ubuntu_no312.txt b/install/ubuntu_no312.txt deleted file mode 100644 index 671febe2b..000000000 --- a/install/ubuntu_no312.txt +++ /dev/null @@ -1,4 +0,0 @@ -gpcam==8.1.12 -scikit-build==0.18.1 -packaging==24.1 -git+https://github.com/bandframework/surmise.git diff --git a/libensemble/executors/mpi_executor.py b/libensemble/executors/mpi_executor.py index 96c4d0d09..28d1fb6f9 100644 --- a/libensemble/executors/mpi_executor.py +++ b/libensemble/executors/mpi_executor.py @@ -47,29 +47,32 @@ class MPIExecutor(Executor): information using the ``custom_info`` argument. This takes a dictionary of values. - The allowable fields are:: + The allowable fields are: - 'mpi_runner' [string]: - Select runner: 'mpich', 'openmpi', 'aprun', 'srun', 'jsrun', 'custom' - All except 'custom' relate to runner classes in libEnsemble. + .. parsed-literal:: + + **'mpi_runner'** [string]: + Select runner: `'mpich'`, `'openmpi'`, `'aprun'`, `'srun'`, `'jsrun'`, `'custom'` + All except `'custom'` relate to runner classes in libEnsemble. Custom allows user to define their own run-lines but without parsing arguments or making use of auto-resources. - 'runner_name' [string]: - Runner name: Replaces run command if present. All runners have a default - except for 'custom'. - 'subgroup_launch' [bool]: + **'runner_name'** [string]: + The literal string that appears at the front of the run command. + This is typically 'mpirun', 'srun', etc., and can be a full path. + Defaults exist for all runners except 'custom'. + **'subgroup_launch'** [bool]: Whether MPI runs should be initiated in a new process group. This needs to be correct for kills to work correctly. Use the standalone test at - libensemble/tests/standalone_tests/kill_test to determine correct value + `libensemble/tests/standalone_tests/kill_test` to determine correct value for a system. - For example:: + For example:: - customizer = {'mpi_runner': 'mpich', - 'runner_name': 'wrapper -x mpich'} + customizer = {'mpi_runner': 'mpich', + 'runner_name': 'wrapper -x mpich'} - from libensemble.executors.mpi_executor import MPIExecutor - exctr = MPIExecutor(custom_info=customizer) + from libensemble.executors.mpi_executor import MPIExecutor + exctr = MPIExecutor(custom_info=customizer) """ @@ -336,6 +339,9 @@ def submit( else: mpi_runner_obj = self.mpi_runner_obj or self._create_mpi_runner_from_attr() + if env_script is None and mpi_runner_obj is None: + raise ExecutorException("No valid MPI runner was found") + mpi_specs = mpi_runner_obj.get_mpi_specs( task, num_procs, diff --git a/libensemble/executors/mpi_runner.py b/libensemble/executors/mpi_runner.py index eb002d14b..48953cc3c 100644 --- a/libensemble/executors/mpi_runner.py +++ b/libensemble/executors/mpi_runner.py @@ -21,11 +21,13 @@ def get_runner(mpi_runner_type, runner_name=None, platform_info=None): "msmpi": MSMPI_MPIRunner, "custom": MPIRunner, } - mpi_runner = mpi_runners[mpi_runner_type] - if runner_name is not None: - runner = mpi_runner(run_command=runner_name, platform_info=platform_info) - else: - runner = mpi_runner(platform_info=platform_info) + runner = None + if mpi_runner_type is not None: + mpi_runner = mpi_runners[mpi_runner_type] + if runner_name is not None: + runner = mpi_runner(run_command=runner_name, platform_info=platform_info) + else: + runner = mpi_runner(platform_info=platform_info) return runner def __init__(self, run_command="mpiexec", platform_info=None): diff --git a/libensemble/gen_funcs/persistent_ax_multitask.py b/libensemble/gen_funcs/persistent_ax_multitask.py index 91a57e2eb..0f5df7e30 100644 --- a/libensemble/gen_funcs/persistent_ax_multitask.py +++ b/libensemble/gen_funcs/persistent_ax_multitask.py @@ -305,7 +305,7 @@ def persistent_gp_mt_ax_gen_f(H, persis_info, gen_specs, libE_info): # Increase iteration counter. model_iteration += 1 - return [], persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG class AxRunner(Runner): diff --git a/libensemble/gen_funcs/persistent_gpCAM.py b/libensemble/gen_funcs/persistent_gpCAM.py index 953cf33cf..c95b9cac1 100644 --- a/libensemble/gen_funcs/persistent_gpCAM.py +++ b/libensemble/gen_funcs/persistent_gpCAM.py @@ -10,8 +10,8 @@ from libensemble.tools.persistent_support import PersistentSupport __all__ = [ - "persistent_gpCAM_simple", - "persistent_gpCAM_ask_tell", + "persistent_gpCAM", + "persistent_gpCAM_covar", ] @@ -140,16 +140,78 @@ def _find_eligible_points(x_for_var, sorted_indices, r, batch_size): return np.array(eligible_points) -def persistent_gpCAM_simple(H_in, persis_info, gen_specs, libE_info): +def persistent_gpCAM(H_in, persis_info, gen_specs, libE_info): + """ + This generation function constructs a global surrogate of `f` values. It is + a batched method that produces a first batch uniformly random from (lb, ub). + On subequent iterations, it calls an optimization method to produce the next + batch of points. This optimization might be too slow (relative to the + simulation evaluation time) for some use cases. + + .. seealso:: + `test_gpCAM.py `_ + """ # noqa + + batch_size, n, lb, ub, all_x, all_y, ps = _initialize_gpcAM(gen_specs["user"], libE_info) + ask_max_iter = gen_specs["user"].get("ask_max_iter") or 10 + + H_o = np.zeros(batch_size, dtype=gen_specs["out"]) + x_new = persis_info["rand_stream"].uniform(lb, ub, (batch_size, n)) + H_o["x"] = x_new + + tag, Work, calc_in = ps.send_recv(H_o) + + first_call = True + while tag not in [STOP_TAG, PERSIS_STOP]: + all_x = np.vstack((all_x, x_new)) + all_y = np.vstack((all_y, np.atleast_2d(calc_in["f"]).T)) + + if first_call: + # Initialize GP + my_gp = GP(all_x, all_y.flatten(), noise_variances=1e-8 * np.ones(len(all_y))) + first_call = False + else: + my_gp.tell(all_x, all_y.flatten(), noise_variances=1e-8 * np.ones(len(all_y))) + + my_gp.train() + + start = time.time() + x_new = my_gp.ask( + input_set=np.column_stack((lb, ub)), + n=batch_size, + pop_size=batch_size, + acquisition_function="total correlation", + max_iter=ask_max_iter, # Larger takes longer. gpCAM default is 20. + )["x"] + print(f"Ask time:{time.time() - start}") + H_o = np.zeros(batch_size, dtype=gen_specs["out"]) + H_o["x"] = x_new + + tag, Work, calc_in = ps.send_recv(H_o) + + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG + + +def persistent_gpCAM_covar(H_in, persis_info, gen_specs, libE_info): """ This generation function constructs a global surrogate of `f` values. It is a batched method that produces a first batch uniformly random from (lb, ub) and on following iterations samples the GP posterior covariance function to find sample points. + If gen_specs["user"]["use_grid"] is set to True the parameter space is + divided into a mesh of candidate points (num_points in each dimension). + Subsequent points chosen by maximum covariance that are at least a distance + `r` away from each other to explore difference regions. + + If gen_specs["user"]["test_points_file"] is set to a file of evaluated + points, then the gpCAM predications are compared at these points to assess + model quality. + .. seealso:: `test_gpCAM.py `_ """ # noqa + U = gen_specs["user"] my_gp = None noise = 1e-12 @@ -212,56 +274,4 @@ def persistent_gpCAM_simple(H_in, persis_info, gen_specs, libE_info): x_for_var = persis_info["rand_stream"].uniform(lb, ub, (10 * batch_size, n)) var_vals = _eval_var(my_gp, all_x, all_y, x_for_var, test_points, persis_info) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG - - -def persistent_gpCAM_ask_tell(H_in, persis_info, gen_specs, libE_info): - """ - Like persistent_gpCAM_simple, this generation function constructs a global - surrogate of `f` values. It also aa batched method that produces a first batch - uniformly random from (lb, ub). On subequent iterations, it calls an - optimization method to produce the next batch of points. This optimization - might be too slow (relative to the simulation evaluation time) for some use cases. - - .. seealso:: - `test_gpCAM.py `_ - """ # noqa - - batch_size, n, lb, ub, all_x, all_y, ps = _initialize_gpcAM(gen_specs["user"], libE_info) - ask_max_iter = gen_specs["user"].get("ask_max_iter") or 10 - - H_o = np.zeros(batch_size, dtype=gen_specs["out"]) - x_new = persis_info["rand_stream"].uniform(lb, ub, (batch_size, n)) - H_o["x"] = x_new - - tag, Work, calc_in = ps.send_recv(H_o) - - first_call = True - while tag not in [STOP_TAG, PERSIS_STOP]: - all_x = np.vstack((all_x, x_new)) - all_y = np.vstack((all_y, np.atleast_2d(calc_in["f"]).T)) - - if first_call: - # Initialize GP - my_gp = GP(all_x, all_y.flatten(), noise_variances=1e-8 * np.ones(len(all_y))) - first_call = False - else: - my_gp.tell(all_x, all_y.flatten(), noise_variances=1e-8 * np.ones(len(all_y))) - - my_gp.train() - - start = time.time() - x_new = my_gp.ask( - input_set=np.column_stack((lb, ub)), - n=batch_size, - pop_size=batch_size, - acquisition_function="total correlation", - max_iter=ask_max_iter, # Larger takes longer. gpCAM default is 20. - )["x"] - print(f"Ask time:{time.time() - start}") - H_o = np.zeros(batch_size, dtype=gen_specs["out"]) - H_o["x"] = x_new - - tag, Work, calc_in = ps.send_recv(H_o) - - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG diff --git a/libensemble/gen_funcs/persistent_inverse_bayes.py b/libensemble/gen_funcs/persistent_inverse_bayes.py index 4875ca03b..2f677902b 100644 --- a/libensemble/gen_funcs/persistent_inverse_bayes.py +++ b/libensemble/gen_funcs/persistent_inverse_bayes.py @@ -3,6 +3,10 @@ from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG from libensemble.tools.persistent_support import PersistentSupport +__all__ = [ + "persistent_updater_after_likelihood", +] + def persistent_updater_after_likelihood(H, persis_info, gen_specs, libE_info): """ """ @@ -36,4 +40,4 @@ def persistent_updater_after_likelihood(H, persis_info, gen_specs, libE_info): if calc_in is not None: w = H_o["prior"] + calc_in["like"] - H_o["prop"] - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG diff --git a/libensemble/gen_funcs/persistent_sampling.py b/libensemble/gen_funcs/persistent_sampling.py index fcbcba090..44611d06b 100644 --- a/libensemble/gen_funcs/persistent_sampling.py +++ b/libensemble/gen_funcs/persistent_sampling.py @@ -60,7 +60,7 @@ def persistent_uniform(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def persistent_uniform_final_update(_, persis_info, gen_specs, libE_info): @@ -163,7 +163,7 @@ def persistent_request_shutdown(_, persis_info, gen_specs, libE_info): print("Reached threshold.", f_count, flush=True) break # End the persistent gen - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_nonblocking(_, persis_info, gen_specs, libE_info): @@ -197,7 +197,7 @@ def uniform_nonblocking(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def batched_history_matching(_, persis_info, gen_specs, libE_info): @@ -243,7 +243,7 @@ def batched_history_matching(_, persis_info, gen_specs, libE_info): mu = np.mean(H_o["x"][best_inds], axis=0) Sigma = np.cov(H_o["x"][best_inds].T) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def persistent_uniform_with_cancellations(_, persis_info, gen_specs, libE_info): @@ -272,4 +272,4 @@ def persistent_uniform_with_cancellations(_, persis_info, gen_specs, libE_info): cancel_from += b ps.request_cancel_sim_ids(cancel_ids) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG diff --git a/libensemble/gen_funcs/persistent_sampling_var_resources.py b/libensemble/gen_funcs/persistent_sampling_var_resources.py index 252fe6019..394ef7581 100644 --- a/libensemble/gen_funcs/persistent_sampling_var_resources.py +++ b/libensemble/gen_funcs/persistent_sampling_var_resources.py @@ -58,7 +58,7 @@ def uniform_sample(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_sample_with_var_gpus(_, persis_info, gen_specs, libE_info): @@ -99,7 +99,7 @@ def uniform_sample_with_var_gpus(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_sample_with_procs_gpus(_, persis_info, gen_specs, libE_info): @@ -128,7 +128,7 @@ def uniform_sample_with_procs_gpus(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_sample_with_var_priorities(_, persis_info, gen_specs, libE_info): @@ -163,7 +163,7 @@ def uniform_sample_with_var_priorities(_, persis_info, gen_specs, libE_info): tag, Work, calc_in = ps.send_recv(H_o) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_sample_diff_simulations(_, persis_info, gen_specs, libE_info): @@ -197,7 +197,7 @@ def uniform_sample_diff_simulations(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def uniform_sample_with_sim_gen_resources(_, persis_info, gen_specs, libE_info): @@ -245,4 +245,4 @@ def uniform_sample_with_sim_gen_resources(_, persis_info, gen_specs, libE_info): if hasattr(calc_in, "__len__"): b = len(calc_in) - return H_o, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG diff --git a/libensemble/gen_funcs/persistent_tasmanian.py b/libensemble/gen_funcs/persistent_tasmanian.py index cedb87c1e..35aa5219d 100644 --- a/libensemble/gen_funcs/persistent_tasmanian.py +++ b/libensemble/gen_funcs/persistent_tasmanian.py @@ -10,6 +10,11 @@ from libensemble.tools import parse_args from libensemble.tools.persistent_support import PersistentSupport +__all__ = [ + "sparse_grid_batched", + "sparse_grid_async", +] + def lex_le(x, y, tol=1e-12): """ @@ -195,7 +200,7 @@ def sparse_grid_batched(H, persis_info, gen_specs, libE_info): assert "sCriteria" in U grid.setSurplusRefinement(U["fTolerance"], U["iOutput"], U["sCriteria"]) - return H0, persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def sparse_grid_async(H, persis_info, gen_specs, libE_info): @@ -283,7 +288,7 @@ def get_refined_points(g, U): else: tag, Work, calc_in = ps.recv() - return [], persis_info, FINISHED_PERSISTENT_GEN_TAG + return None, persis_info, FINISHED_PERSISTENT_GEN_TAG def get_sparse_grid_specs(user_specs, sim_f, num_dims, num_outputs=1, mode="batched"): diff --git a/libensemble/tests/regression_tests/test_gpCAM.py b/libensemble/tests/regression_tests/test_gpCAM.py index c62ebb461..62a9a7955 100644 --- a/libensemble/tests/regression_tests/test_gpCAM.py +++ b/libensemble/tests/regression_tests/test_gpCAM.py @@ -26,7 +26,7 @@ import numpy as np from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f -from libensemble.gen_funcs.persistent_gpCAM import persistent_gpCAM_ask_tell, persistent_gpCAM_simple +from libensemble.gen_funcs.persistent_gpCAM import persistent_gpCAM, persistent_gpCAM_covar # Import libEnsemble items for this test from libensemble.libE import libE @@ -69,7 +69,7 @@ for inst in range(3): if inst == 0: - gen_specs["gen_f"] = persistent_gpCAM_simple + gen_specs["gen_f"] = persistent_gpCAM_covar num_batches = 10 exit_criteria = {"sim_max": num_batches * batch_size, "wallclock_max": 300} libE_specs["save_every_k_gens"] = 150 @@ -81,7 +81,7 @@ del libE_specs["H_file_prefix"] del libE_specs["save_every_k_gens"] elif inst == 2: - gen_specs["gen_f"] = persistent_gpCAM_ask_tell + gen_specs["gen_f"] = persistent_gpCAM num_batches = 3 # Few because the ask_tell gen can be slow gen_specs["user"]["ask_max_iter"] = 1 # For quicker test exit_criteria = {"sim_max": num_batches * batch_size, "wallclock_max": 300}