Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add alpha eigenvalue simulation mode #17

Open
wants to merge 8 commits into
base: develop
Choose a base branch
from
Prev Previous commit
Updates to new Simulation methods for combing and cancellation
HunterBelanger committed Sep 14, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit 87a731d7ee073407398c57b4687243762df77ecc
4 changes: 0 additions & 4 deletions include/simulation/alpha_power_iterator.hpp
Original file line number Diff line number Diff line change
@@ -107,10 +107,6 @@ class AlphaPowerIterator : public Simulation {

void normalize_weights_and_calc_kstep(std::vector<BankedParticle>& next_gen);

void comb_particles(std::vector<BankedParticle>& next_gen);

void perform_regional_cancellation(std::vector<BankedParticle>& next_gen);

// Entropy calculation
void compute_pre_cancellation_entropy(std::vector<BankedParticle>& next_gen);
void compute_post_cancellation_entropy(std::vector<BankedParticle>& next_gen);
129 changes: 4 additions & 125 deletions src/alpha_power_iterator.cpp
Original file line number Diff line number Diff line change
@@ -455,7 +455,7 @@ void AlphaPowerIterator::run() {

// Do weight cancelation
if (cancelator) {
perform_regional_cancellation(next_gen);
perform_regional_cancellation(cancelator, next_gen);
}

// Changes alpha_params.keff
@@ -471,7 +471,7 @@ void AlphaPowerIterator::run() {
Tallies::instance().set_k_alpha(alpha_params.keff);

// Comb particles
if (combing) comb_particles(next_gen);
if (combing) comb_particles(next_gen, Npos, Nneg, Nnet, Ntot);

// Synchronize particles across nodes
sync_banks(mpi::node_nparticles, next_gen);
@@ -554,14 +554,10 @@ void AlphaPowerIterator::run() {
output << " Results using " << gen - nignored << " active generations:\n";
output << " -----------------------------------\n";
output << std::fixed << std::setprecision(6);
output << " | alpha = " << Tallies::instance().alpha_avg() << " +/- "
output << " | alpha = " << Tallies::instance().alpha_avg() << " +/- "
<< Tallies::instance().alpha_err() << " |\n";
output << " | kstep = " << Tallies::instance().k_alpha_avg() << " +/- "
output << " | kstep = " << Tallies::instance().k_alpha_avg() << " +/- "
<< Tallies::instance().k_alpha_err() << " |\n";
if (settings::energy_mode == settings::EnergyMode::CE) {
output << " | kabs = " << Tallies::instance().kabs_avg() << " +/- "
<< Tallies::instance().kabs_err() << " |\n";
}
output << " | leakage = " << Tallies::instance().leakage_avg() << " +/- "
<< Tallies::instance().leakage_err() << " |\n";
output << " -----------------------------------\n";
@@ -583,95 +579,6 @@ void AlphaPowerIterator::run() {
write_source(bank);
}

void AlphaPowerIterator::comb_particles(std::vector<BankedParticle>& next_gen) {
double Wpos_node = 0.;
double Wneg_node = 0.;
// Get sum of total positive and negative weights using Kahan Summation
std::tie(Wpos_node, Wneg_node, std::ignore, std::ignore) =
kahan_bank(next_gen.begin(), next_gen.end());

// Get total weights for all nodes
std::vector<double> Wpos_each_node(static_cast<std::size_t>(mpi::size), 0.);
Wpos_each_node[static_cast<std::size_t>(mpi::rank)] = Wpos_node;
mpi::Allreduce_sum(Wpos_each_node);
const double Wpos = kahan(Wpos_each_node.begin(), Wpos_each_node.end(), 0.);

std::vector<double> Wneg_each_node(static_cast<std::size_t>(mpi::size), 0.);
Wneg_each_node[static_cast<std::size_t>(mpi::rank)] = Wneg_node;
mpi::Allreduce_sum(Wneg_each_node);
const double Wneg = kahan(Wneg_each_node.begin(), Wneg_each_node.end(), 0.);

// Determine how many positive and negative particles we want to have after
// combing on this node and globaly as well
const std::size_t Npos_node = static_cast<std::size_t>(std::round(Wpos_node));
const std::size_t Nneg_node =
static_cast<std::size_t>(std::round(std::abs(Wneg_node)));

const std::size_t Npos = static_cast<std::size_t>(std::round(Wpos));
const std::size_t Nneg = static_cast<std::size_t>(std::round(std::abs(Wneg)));

// The + 2 is to account for rounding in the ceil operations between global
// array vs just the node
std::vector<BankedParticle> new_next_gen;
new_next_gen.reserve(Npos_node + Nneg_node + 2);

// Variables for combing particles
const double avg_pos_wgt = Wpos / static_cast<double>(Npos);
const double avg_neg_wgt = std::abs(Wneg) / static_cast<double>(Nneg);
double comb_position_pos = 0.;
double comb_position_neg = 0.;
if (mpi::rank == 0) {
// The initial random offset of the comb is sampled on the master node.
comb_position_pos = settings::rng() * avg_pos_wgt;
comb_position_neg = settings::rng() * avg_neg_wgt;
}
mpi::Bcast(comb_position_pos, 0);
mpi::Bcast(comb_position_neg, 0);

// Find Comb Position for this Node
const double U_pos =
kahan(Wpos_each_node.begin(), Wpos_each_node.begin() + mpi::rank, 0.);
const double beta_pos = std::floor((U_pos - comb_position_pos) / avg_pos_wgt);

const double U_neg = std::abs(
kahan(Wneg_each_node.begin(), Wneg_each_node.begin() + mpi::rank, 0.));
const double beta_neg = std::floor((U_neg - comb_position_neg) / avg_neg_wgt);

if (mpi::rank != 0) {
comb_position_pos =
((beta_pos + 1.) * avg_pos_wgt) + comb_position_pos - U_pos;
if (comb_position_pos > avg_pos_wgt) comb_position_pos -= avg_pos_wgt;

comb_position_neg =
((beta_neg + 1.) * avg_neg_wgt) + comb_position_neg - U_neg;
if (comb_position_neg > avg_neg_wgt) comb_position_neg -= avg_neg_wgt;
}

double current_particle_pos = 0.;
double current_particle_neg = 0.;
// Comb positive and negative particles at the same time, to ensure that
// particle orders aren't changed for different parallelization schemes.
for (std::size_t i = 0; i < next_gen.size(); i++) {
if (next_gen[i].wgt > 0.) {
current_particle_pos += next_gen[i].wgt;
while (comb_position_pos < current_particle_pos) {
new_next_gen.push_back(next_gen[i]);
new_next_gen.back().wgt = avg_pos_wgt;
comb_position_pos += avg_pos_wgt;
}
} else {
current_particle_neg -= next_gen[i].wgt;
while (comb_position_neg < current_particle_neg) {
new_next_gen.push_back(next_gen[i]);
new_next_gen.back().wgt = -avg_neg_wgt;
comb_position_neg += avg_neg_wgt;
}
}
}

next_gen.swap(new_next_gen);
}

void AlphaPowerIterator::write_entropy_families_etc_to_results() const {
if (mpi::rank != 0) return;

@@ -965,34 +872,6 @@ void AlphaPowerIterator::check_time(std::size_t gen) {
}
}

void AlphaPowerIterator::perform_regional_cancellation(
std::vector<BankedParticle>& next_gen) {
// Only perform cancellation if we are master !!
std::size_t n_lost_boys = 0;

// Distribute Particles to Cancellation Bins
for (auto& p : next_gen) {
if (!cancelator->add_particle(p)) n_lost_boys++;
}

if (n_lost_boys > 0)
std::cout << " There are " << n_lost_boys
<< " particles with no cancellation bin.\n";

// Perform Cancellation for each Bin
cancelator->perform_cancellation();

// All particles which were placed into a cancellation bin from next_gen
// now have modified weights.
// Now we can get the uniform particles
auto tmp = cancelator->get_new_particles(settings::rng);

if (tmp.size() > 0) next_gen.insert(next_gen.begin(), tmp.begin(), tmp.end());

// All done ! Clear cancelator for next run
cancelator->clear();
}

std::shared_ptr<AlphaPowerIterator> make_alpha_power_iterator(
const YAML::Node& sim) {
// Get the number of particles