Skip to content

Introduce create_identity_partitioner for refined mesh partitioning #3661

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 34 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
22f7471
Sketch out strategy and copy maintain_coarse_partitioner (for non gho…
schnellerhase Mar 11, 2025
6160a96
A first version
schnellerhase Mar 11, 2025
c7f6e32
Compiling...
schnellerhase Mar 11, 2025
c4afbcb
Fix: python layer default value of partitioner does not align with cp…
schnellerhase Mar 11, 2025
d865056
Debug
schnellerhase Mar 11, 2025
9b8314b
Move compute_destination_ranks out of anonymous namespace
schnellerhase Mar 11, 2025
c4766dc
Add docstring
schnellerhase Mar 12, 2025
bb79ada
Improve sequential code path
schnellerhase Mar 12, 2025
d2ac333
Add explicit instantiations
schnellerhase Mar 12, 2025
b59a167
Doxygen fix explicit instantiation
schnellerhase Mar 12, 2025
5419dee
Move docstring to header
schnellerhase Mar 12, 2025
6f1fcbc
Remove cpp docstring
schnellerhase Mar 12, 2025
701e399
Change defaults and add special case of config
schnellerhase Mar 12, 2025
1d19a21
Switch to optional to handle cases correctly
schnellerhase Mar 12, 2025
5629a0e
Update docstring
schnellerhase Mar 12, 2025
20cdceb
Merge branch 'main' into fix_3443
schnellerhase Mar 15, 2025
21539cc
Merge branch 'main' into fix_3443
schnellerhase Mar 18, 2025
c386c6a
Fix Python export for optional
schnellerhase Mar 18, 2025
c4cc987
Merge branch 'main' into fix_3443
schnellerhase Mar 18, 2025
6a79e10
Update python default value for refinement optio
schnellerhase Mar 18, 2025
dba96cc
Fix interval refinement test
schnellerhase Mar 18, 2025
c4eaa0b
Merge branch 'main' into fix_3443
garth-wells Mar 19, 2025
c2917dd
Partial fix
schnellerhase Mar 24, 2025
a8a2269
Merge branch 'main' into fix_3443
schnellerhase Mar 24, 2025
8973edc
Track down further
schnellerhase Mar 25, 2025
b769eeb
Showcase None export problem
schnellerhase Mar 25, 2025
b2b9902
Merge branch 'main' into fix_3443
schnellerhase Mar 31, 2025
fe6de20
Remove empty_partitioner and introduce helper variable as sentinel
schnellerhase Mar 31, 2025
91d02f0
Ruff
schnellerhase Mar 31, 2025
6004efb
Tidy docstring
schnellerhase Mar 31, 2025
fd71eeb
Return to previous test state
schnellerhase Mar 31, 2025
a3d79bf
Merge branch 'main' into fix_3443
schnellerhase Apr 2, 2025
fca1671
Add unit test for identity_partitioner
schnellerhase Apr 2, 2025
5b5a611
Merge branch 'main' into fix_3443
garth-wells Apr 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 15 additions & 17 deletions cpp/dolfinx/graph/partitioners.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,21 +35,8 @@ extern "C"

using namespace dolfinx;

namespace
{
/// @todo Is it un-documented that the owning rank must come first in
/// reach list of edges?
///
/// @param[in] comm The communicator
/// @param[in] graph Graph, using global indices for graph edges
/// @param[in] node_disp The distribution of graph nodes across MPI
/// ranks. The global index `gidx` of local index `lidx` is `lidx +
/// node_disp[my_rank]`.
/// @param[in] part The destination rank for owned nodes, i.e. `dest[i]`
/// is the destination of the node with local index `i`.
/// @return Destination ranks for each local node.
template <typename T>
graph::AdjacencyList<int> compute_destination_ranks(
graph::AdjacencyList<int> dolfinx::graph::compute_destination_ranks(
MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& graph,
const std::vector<T>& node_disp, const std::vector<T>& part)
{
Expand Down Expand Up @@ -202,6 +189,18 @@ graph::AdjacencyList<int> compute_destination_ranks(

return g;
}

/// @cond
template graph::AdjacencyList<int> dolfinx::graph::compute_destination_ranks(
MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& graph,
const std::vector<int>& node_disp, const std::vector<int>& part);

template graph::AdjacencyList<int> dolfinx::graph::compute_destination_ranks(
MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& graph,
const std::vector<unsigned long long>& node_disp,
const std::vector<unsigned long long>& part);
/// @endcond

//-----------------------------------------------------------------------------
#ifdef HAS_PARMETIS
template <typename T>
Expand Down Expand Up @@ -304,7 +303,6 @@ std::vector<int> refine(MPI_Comm comm, const graph::AdjacencyList<T>& adj_graph)
//-----------------------------------------------------------------------------
}
#endif
} // namespace

//-----------------------------------------------------------------------------
#ifdef HAS_PTSCOTCH
Expand Down Expand Up @@ -587,7 +585,7 @@ graph::partition_fn graph::parmetis::partitioner(double imbalance,
{
// FIXME: Is it implicit that the first entry is the owner?
graph::AdjacencyList<int> dest
= compute_destination_ranks(pcomm, graph, node_disp, part);
= graph::compute_destination_ranks(pcomm, graph, node_disp, part);
if (split_comm)
MPI_Comm_free(&pcomm);
return dest;
Expand Down Expand Up @@ -653,7 +651,7 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed,
timer2.stop();

if (ghosting)
return compute_destination_ranks(comm, graph, node_disp, part);
return graph::compute_destination_ranks(comm, graph, node_disp, part);
else
{
return regular_adjacency_list(std::vector<int>(part.begin(), part.end()),
Expand Down
17 changes: 17 additions & 0 deletions cpp/dolfinx/graph/partitioners.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,23 @@

namespace dolfinx::graph
{

/// @todo Is it un-documented that the owning rank must come first in
/// reach list of edges?
///
/// @param[in] comm The communicator
/// @param[in] graph Graph, using global indices for graph edges
/// @param[in] node_disp The distribution of graph nodes across MPI
/// ranks. The global index `gidx` of local index `lidx` is `lidx +
/// node_disp[my_rank]`.
/// @param[in] part The destination rank for owned nodes, i.e. `dest[i]`
/// is the destination of the node with local index `i`.
/// @return Destination ranks for each local node.
template <typename T>
graph::AdjacencyList<int> compute_destination_ranks(
MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& graph,
const std::vector<T>& node_disp, const std::vector<T>& part);

namespace scotch
{
#ifdef HAS_PTSCOTCH
Expand Down
98 changes: 87 additions & 11 deletions cpp/dolfinx/refinement/refine.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,89 @@

#pragma once

#include "dolfinx/common/MPI.h"
#include "dolfinx/graph/AdjacencyList.h"
#include "dolfinx/graph/partitioners.h"
#include "dolfinx/mesh/Mesh.h"
#include "dolfinx/mesh/Topology.h"
#include "dolfinx/mesh/cell_types.h"
#include "dolfinx/mesh/graphbuild.h"
#include "dolfinx/mesh/utils.h"
#include "interval.h"
#include "plaza.h"
#include <algorithm>
#include <concepts>
#include <mpi.h>
#include <optional>
#include <spdlog/spdlog.h>
#include <stdexcept>
#include <utility>

namespace dolfinx::refinement
{

/**
* @brief Create a cell partitioner which maintains the partition of a coarse
* mesh.
*
* @tparam T floating point type of mesh geometry.
* @param parent_mesh mesh indicating the partition scheme to use, i.e. the
* coarse mesh.
* @param parent_cell list of cell indices mapping cells of the new refined mesh
* into the coarse mesh, usually output of `refinement::refine`.
* @return The created cell partition function.
*/
template <std::floating_point T>
mesh::CellPartitionFunction
create_identity_partitioner(const mesh::Mesh<T>& parent_mesh,
std::span<std::int32_t> parent_cell)
{
// TODO: optimize for non ghosted mesh?

return [&parent_mesh,
parent_cell](MPI_Comm comm, int /*nparts*/,
std::vector<mesh::CellType> cell_types,
std::vector<std::span<const std::int64_t>> cells)
-> graph::AdjacencyList<std::int32_t>
{
auto cell_im
= parent_mesh.topology()->index_map(parent_mesh.topology()->dim());

std::int32_t num_cells
= cells.front().size() / mesh::num_cell_vertices(cell_types.front());
std::vector<std::int32_t> destinations(num_cells);

int rank = dolfinx::MPI::rank(comm);
for (std::int32_t i = 0; i < destinations.size(); i++)
{
bool ghost_parent_cell = parent_cell[i] > cell_im->size_local();
if (ghost_parent_cell)
{
destinations[i]
= cell_im->owners()[parent_cell[i] - cell_im->size_local()];
}
else
{
destinations[i] = rank;
}
}

if (comm == MPI_COMM_NULL)
{
return graph::regular_adjacency_list(std::move(destinations), 1);
}

auto dual_graph = mesh::build_dual_graph(comm, cell_types, cells);
std::vector<std::int32_t> node_disp(MPI::size(comm) + 1, 0);
std::int32_t local_size = dual_graph.num_nodes();
MPI_Allgather(&local_size, 1, dolfinx::MPI::mpi_t<std::int32_t>,
node_disp.data() + 1, 1, dolfinx::MPI::mpi_t<std::int32_t>,
comm);
std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin());
return compute_destination_ranks(comm, dual_graph, node_disp, destinations);
};
}

/// @brief Refine a mesh with markers.
///
/// The refined mesh can be optionally re-partitioned across processes.
Expand All @@ -35,30 +103,26 @@ namespace dolfinx::refinement
/// refined mesh will **not** include ghosts cells (cells connected by
/// facet to an owned cell) even if the parent mesh is ghosted.
///
/// @warning Passing `nullptr` for `partitioner`, the refined mesh will
/// **not** have ghosts cells even if the parent mesh is ghosted. The
/// possibility to not re-partition the refined mesh and include ghost
/// cells in the refined mesh will be added in a future release.
///
/// @param[in] mesh Input mesh to be refined.
/// @param[in] edges Indices of the edges that should be split in the
/// refinement. If not provided (`std::nullopt`), uniform refinement is
/// performed.
/// @param[in] partitioner Partitioner to be used to distribute the
/// refined mesh. If not callable, refined cells will be on the same
/// @param[in] partitioner (Optional) partitioner to be used to distribute the
/// refined mesh. If not provided (`std::nullopt`) the partition of the coarse
/// mesh will be maintained. If not callable, refined cells will be on the same
/// process as the parent cell.
/// @param[in] option Control the computation of parent facets, parent
/// cells. If an option is not selected, an empty list is returned.
/// cells.
/// @return New mesh, and optional parent cell indices and parent facet
/// indices.
template <std::floating_point T>
std::tuple<mesh::Mesh<T>, std::optional<std::vector<std::int32_t>>,
std::optional<std::vector<std::int8_t>>>
refine(const mesh::Mesh<T>& mesh,
std::optional<std::span<const std::int32_t>> edges,
const mesh::CellPartitionFunction& partitioner
= mesh::create_cell_partitioner(mesh::GhostMode::none),
Option option = Option::none)
std::optional<mesh::CellPartitionFunction> partitioner = std::nullopt,
Option option = Option::parent_cell)
{
auto topology = mesh.topology();
assert(topology);
Expand All @@ -70,9 +134,20 @@ refine(const mesh::Mesh<T>& mesh,
? interval::compute_refinement_data(mesh, edges, option)
: plaza::compute_refinement_data(mesh, edges, option);

if (!partitioner.has_value())
{
if (!parent_cell)
{
throw std::runtime_error(
"Identity partitioner relies on parent cell computation");
}
assert(parent_cell);
partitioner = create_identity_partitioner(mesh, parent_cell.value());
}

mesh::Mesh<T> mesh1 = mesh::create_mesh(
mesh.comm(), mesh.comm(), cell_adj.array(), mesh.geometry().cmap(),
mesh.comm(), new_vertex_coords, xshape, partitioner);
mesh.comm(), new_vertex_coords, xshape, *partitioner);

// Report the number of refined cells
const int D = topology->dim();
Expand All @@ -84,4 +159,5 @@ refine(const mesh::Mesh<T>& mesh,

return {std::move(mesh1), std::move(parent_cell), std::move(parent_facet)};
}

} // namespace dolfinx::refinement
18 changes: 8 additions & 10 deletions python/dolfinx/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,8 +518,9 @@ def transfer_meshtag(
def refine(
msh: Mesh,
edges: typing.Optional[np.ndarray] = None,
partitioner: typing.Optional[typing.Callable] = create_cell_partitioner(GhostMode.none),
option: RefinementOption = RefinementOption.none,
redistribute: bool = False,
partitioner: typing.Optional[typing.Callable] = None,
option: RefinementOption = RefinementOption.parent_cell,
) -> tuple[Mesh, npt.NDArray[np.int32], npt.NDArray[np.int8]]:
"""Refine a mesh.

Expand All @@ -531,17 +532,14 @@ def refine(
mesh will **not** include ghosts cells (cells connected by facet
to an owned cells) even if the parent mesh is ghosted.

Warning:
Passing ``None`` for ``partitioner``, the refined mesh will
**not** have ghosts cells even if the parent mesh has ghost
cells. The possibility to not re-partition the refined mesh and
include ghost cells in the refined mesh will be added in a
future release.

Args:
msh: Mesh from which to create the refined mesh.
edges: Indices of edges to split during refinement. If ``None``,
mesh refinement is uniform.
redistribute: Controls whether the refined mesh should be redistributed during creation. If
``False`` (default) the refined mesh will exchange ghosts cells, but distribution of the
coarse mesh is maintained, otherwise the passed ``partitioner`` is used to redistribute
the refined mesh.
partitioner: Partitioner to distribute the refined mesh. If
``None`` no redistribution is performed, i.e. refined cells
remain on the same process as the parent cell.
Expand All @@ -552,7 +550,7 @@ def refine(
Refined mesh, (optional) parent cells, (optional) parent facets
"""
mesh1, parent_cell, parent_facet = _cpp.refinement.refine(
msh._cpp_object, edges, partitioner, option
msh._cpp_object, edges, redistribute, partitioner, option
)
# Create new ufl domain as it will carry a reference to the C++ mesh
# in the ufl_cargo
Expand Down
30 changes: 20 additions & 10 deletions python/dolfinx/wrappers/refinement.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <nanobind/stl/vector.h>
#include <optional>
#include <span>
#include <stdexcept>

namespace nb = nanobind;

Expand All @@ -39,23 +40,32 @@ void export_refinement(nb::module_& m)
std::optional<
nb::ndarray<const std::int32_t, nb::ndim<1>, nb::c_contig>>
edges,
std::optional<
dolfinx_wrappers::part::impl::PythonCellPartitionFunction>
partitioner,
bool redistribute,
dolfinx_wrappers::part::impl::PythonCellPartitionFunction partitioner,
dolfinx::refinement::Option option)
{
if (!redistribute && partitioner)
{
throw std::runtime_error("Providing a partitioner and passing "
"redistribute=False is bugprone.");
}

std::optional<std::span<const std::int32_t>> cpp_edges(std::nullopt);
if (edges.has_value())
{
cpp_edges.emplace(
std::span(edges.value().data(), edges.value().size()));
}

dolfinx_wrappers::part::impl::CppCellPartitionFunction cpp_partitioner
= partitioner.has_value()
? dolfinx_wrappers::part::impl::create_cell_partitioner_cpp(
partitioner.value())
: nullptr;
std::optional<dolfinx_wrappers::part::impl::CppCellPartitionFunction>
cpp_partitioner(std::nullopt);
if (redistribute)
{
cpp_partitioner
= dolfinx_wrappers::part::impl::create_cell_partitioner_cpp(
partitioner);
}

auto [mesh1, cell, facet] = dolfinx::refinement::refine(
mesh, cpp_edges, cpp_partitioner, option);

Expand All @@ -78,8 +88,8 @@ void export_refinement(nb::module_& m)
return std::tuple{std::move(mesh1), std::move(python_cell),
std::move(python_facet)};
},
nb::arg("mesh"), nb::arg("edges").none(), nb::arg("partitioner").none(),
nb::arg("option"));
nb::arg("mesh"), nb::arg("edges").none(), nb::arg("redistribute"),
nb::arg("partitioner").none(), nb::arg("option"));
}
} // namespace

Expand Down
3 changes: 2 additions & 1 deletion python/test/unit/refinement/test_interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
"ghost_mode_refined",
[mesh.GhostMode.none, mesh.GhostMode.shared_vertex, mesh.GhostMode.shared_facet],
)
@pytest.mark.parametrize("option", [mesh.RefinementOption.none, mesh.RefinementOption.parent_cell])
@pytest.mark.parametrize("option", [mesh.RefinementOption.parent_cell])
# TODO: reactivate mesh.mesh.RefinementOption.none
def test_refine_interval(n, ghost_mode, ghost_mode_refined, option):
msh = mesh.create_interval(MPI.COMM_WORLD, n, [0, 1], ghost_mode=ghost_mode)
msh_refined, edges, vertices = mesh.refine(
Expand Down
Loading
Loading