diff --git a/include/mesh/mesh_base.h b/include/mesh/mesh_base.h index eaee2a3b2d..a2f89f5880 100644 --- a/include/mesh/mesh_base.h +++ b/include/mesh/mesh_base.h @@ -1329,7 +1329,7 @@ class MeshBase : public ParallelObject * Recalculate any cached data after elements and nodes have been * repartitioned. */ - virtual void update_post_partitioning () {} + virtual void update_post_partitioning (); /** * If false is passed in then this mesh will no longer be renumbered @@ -1492,6 +1492,8 @@ class MeshBase : public ParallelObject * materials in a solid mechanics application, or regions where different * physical processes are important. The subdomain mapping is independent * from the parallel decomposition. + * + * This relies on the mesh cached element data being prepared. */ subdomain_id_type n_subdomains () const; @@ -1501,6 +1503,8 @@ class MeshBase : public ParallelObject * materials in a solid mechanics application, or regions where different * physical processes are important. The subdomain mapping is independent * from the parallel decomposition. + * + * This relies on the mesh cached element data being prepared. */ subdomain_id_type n_local_subdomains () const; @@ -1991,7 +1995,16 @@ class MeshBase : public ParallelObject * being prepared */ const std::set & get_mesh_subdomains() const - { libmesh_assert(this->is_prepared()); return _mesh_subdomains; } + { libmesh_assert(this->preparation().has_cached_elem_data); return _mesh_subdomains; } + + + /** + * \return The cached mesh subdomains. As long as the mesh is prepared, this + * should contain all the subdomain ids across processors. Relies on the mesh + * being prepared + */ + const std::set & get_mesh_local_subdomains() const + { libmesh_assert(this->preparation().has_cached_elem_data); return _mesh_local_subdomains; } #ifdef LIBMESH_ENABLE_PERIODIC /** @@ -2257,6 +2270,12 @@ class MeshBase : public ParallelObject */ std::set _mesh_subdomains; + /** + * We also cache the subdomain ids of the elements owned by this + * processor. + */ + std::set _mesh_local_subdomains; + /** * Map from "element set code" to list of set ids to which that element * belongs (and vice-versa). Remarks: diff --git a/src/mesh/distributed_mesh.C b/src/mesh/distributed_mesh.C index 9fdc2e3fae..17a6612d37 100644 --- a/src/mesh/distributed_mesh.C +++ b/src/mesh/distributed_mesh.C @@ -1060,7 +1060,7 @@ void DistributedMesh::redistribute () void DistributedMesh::update_post_partitioning () { - // this->recalculate_n_partitions(); + this->UnstructuredMesh::update_post_partitioning(); // Partitioning changes our numbers of unpartitioned objects this->update_parallel_id_counts(); diff --git a/src/mesh/mesh_base.C b/src/mesh/mesh_base.C index 9282a104d3..6a4eb5c8a8 100644 --- a/src/mesh/mesh_base.C +++ b/src/mesh/mesh_base.C @@ -120,6 +120,7 @@ MeshBase::MeshBase (const MeshBase & other_mesh) : _elem_default_orders(other_mesh._elem_default_orders), _supported_nodal_order(other_mesh._supported_nodal_order), _mesh_subdomains(other_mesh._mesh_subdomains), + _mesh_local_subdomains(other_mesh._mesh_local_subdomains), _elemset_codes_inverse_map(other_mesh._elemset_codes_inverse_map), _all_elemset_ids(other_mesh._all_elemset_ids), _spatial_dimension(other_mesh._spatial_dimension), @@ -209,6 +210,7 @@ MeshBase& MeshBase::operator= (MeshBase && other_mesh) _elem_default_orders = std::move(other_mesh.elem_default_orders()); _supported_nodal_order = other_mesh.supported_nodal_order(); _mesh_subdomains = other_mesh._mesh_subdomains; + _mesh_local_subdomains = other_mesh._mesh_local_subdomains; _elemset_codes = std::move(other_mesh._elemset_codes); _elemset_codes_inverse_map = std::move(other_mesh._elemset_codes_inverse_map); _all_elemset_ids = std::move(other_mesh._all_elemset_ids); @@ -325,6 +327,8 @@ bool MeshBase::locally_equals (const MeshBase & other_mesh) const return false; if (_mesh_subdomains != other_mesh._mesh_subdomains) return false; + if (_mesh_local_subdomains != other_mesh._mesh_local_subdomains) + return false; if (_all_elemset_ids != other_mesh._all_elemset_ids) return false; if (_elem_integer_names != other_mesh._elem_integer_names) @@ -1076,26 +1080,12 @@ void MeshBase::remove_ghosting_functor(GhostingFunctor & ghosting_functor) void MeshBase::subdomain_ids (std::set & ids, const bool global /* = true */) const { - // This requires an inspection on every processor - if (global) - parallel_object_only(); - - ids.clear(); - - for (const auto & elem : this->active_local_element_ptr_range()) - ids.insert(elem->subdomain_id()); + libmesh_assert(this->preparation().has_cached_elem_data); if (global) - { - // Only include the unpartitioned elements if the user requests the global IDs. - // In the case of the local subdomain IDs, it doesn't make sense to include the - // unpartitioned elements because said elements do not have a sense of locality. - for (const auto & elem : this->active_unpartitioned_element_ptr_range()) - ids.insert(elem->subdomain_id()); - - // Some subdomains may only live on other processors - this->comm().set_union(ids); - } + ids = this->get_mesh_subdomains(); + else + ids = this->get_mesh_local_subdomains(); } @@ -1112,16 +1102,19 @@ void MeshBase::redistribute() -subdomain_id_type MeshBase::n_subdomains() const +void MeshBase::update_post_partitioning() { - // This requires an inspection on every processor - parallel_object_only(); + _mesh_local_subdomains.clear(); - std::set ids; + for (const Elem * elem : this->active_local_element_ptr_range()) + _mesh_local_subdomains.insert(elem->subdomain_id()); +} - this->subdomain_ids (ids); - return cast_int(ids.size()); + +subdomain_id_type MeshBase::n_subdomains() const +{ + return cast_int(this->get_mesh_subdomains().size()); } @@ -1860,6 +1853,7 @@ void MeshBase::cache_elem_data() _elem_dims.clear(); _elem_default_orders.clear(); _mesh_subdomains.clear(); + _mesh_local_subdomains.clear(); _supported_nodal_order = MAXIMUM; for (const auto & elem : this->active_element_ptr_range()) @@ -1867,6 +1861,8 @@ void MeshBase::cache_elem_data() _elem_dims.insert(cast_int(elem->dim())); _elem_default_orders.insert(elem->default_order()); _mesh_subdomains.insert(elem->subdomain_id()); + if (elem->processor_id() == this->processor_id()) + _mesh_local_subdomains.insert(elem->subdomain_id()); _supported_nodal_order = static_cast (std::min(static_cast(_supported_nodal_order), @@ -2220,6 +2216,7 @@ MeshBase::copy_cached_data(const MeshBase & other_mesh) this->_elem_default_orders = other_mesh._elem_default_orders; this->_supported_nodal_order = other_mesh._supported_nodal_order; this->_mesh_subdomains = other_mesh._mesh_subdomains; + this->_mesh_local_subdomains = other_mesh._mesh_local_subdomains; } @@ -2481,13 +2478,18 @@ MeshBase::copy_constraint_rows(const SparseMatrix & constraint_operator, // DistributedMesh doesn't get confused and think you're not // adding them on all processors at once. int n_pids = 0; + processor_id_type best_pid = DofObject::invalid_processor_id; for (auto [pid, count] : pids) if (count >= n_pids) { n_pids = count; - added_elem->processor_id() = pid; - n->processor_id() = pid; + best_pid = pid; } + libmesh_assert_not_equal_to(best_pid, DofObject::invalid_processor_id); + added_elem->processor_id() = best_pid; + n->processor_id() = best_pid; + if (this->processor_id() == best_pid) + this->_mesh_local_subdomains.insert(new_sbd_id); } // Calculate constraint rows in an indexed form that's easy for us diff --git a/src/mesh/mesh_tetgen_interface.C b/src/mesh/mesh_tetgen_interface.C index 76cce90cd4..e37ae40511 100644 --- a/src/mesh/mesh_tetgen_interface.C +++ b/src/mesh/mesh_tetgen_interface.C @@ -125,6 +125,12 @@ void TetGenMeshInterface::triangulate_pointset () // We don't do this by default. if (this->_smooth_after_generating) LaplaceMeshSmoother(this->_mesh, 2).smooth(); + + // We've added a bunch of elements. A full prepare_for_use() would + // be expensive here and user code hasn't been expecting it, but we + // do have code downstream expecting element caches to be up to + // date. + this->_mesh.cache_elem_data(); } diff --git a/src/partitioning/partitioner.C b/src/partitioning/partitioner.C index d7df9162ce..b75dbeccff 100644 --- a/src/partitioning/partitioner.C +++ b/src/partitioning/partitioner.C @@ -215,6 +215,11 @@ void Partitioner::partition (MeshBase & mesh, if (n_parts == 1) { this->single_partition (mesh); + + // Give derived Mesh classes a chance to update any cached data + // to reflect the new partitioning + mesh.update_post_partitioning(); + return; } diff --git a/tests/geom/elem_test.h b/tests/geom/elem_test.h index b7d27cc29b..cbab0c0d77 100644 --- a/tests/geom/elem_test.h +++ b/tests/geom/elem_test.h @@ -232,15 +232,16 @@ class PerElemTest : public CppUnit::TestCase // out here; permutations that fix diagonals for us are used // instead. const std::vector> nodes_on_side = - { {0, 1, 2, 3}, // min z - {0, 1, 5, 4}, // min y - // {1, 2, 6, 5}, // max x - bad - {2, 6, 5, 1}, // max x - {2, 3, 7, 6}, // max y - // {3, 0, 4, 7}, // min x - bad - {0, 4, 7, 3}, // min x - // {4, 5, 6, 7} }; // max z - bad - {5, 6, 7, 4} }; // max z + { {0, 1, 2, 3}, // min z + {0, 1, 5, 4}, // min y + // {1, 2, 6, 5}, // max x - bad + {2, 6, 5, 1}, // max x + {2, 3, 7, 6}, // max y + // {3, 0, 4, 7}, // min x - bad + {0, 4, 7, 3}, // min x + // {4, 5, 6, 7} // max z - bad + {5, 6, 7, 4} // max z + }; #endif // Build all the sides. diff --git a/tests/mesh/mesh_elem_test.h b/tests/mesh/mesh_elem_test.h index 30cf7b2dbb..fc7e92d5e6 100644 --- a/tests/mesh/mesh_elem_test.h +++ b/tests/mesh/mesh_elem_test.h @@ -106,6 +106,11 @@ class MeshPerElemTest : public PerElemTest #endif } + // Setting all those processor ids to 0 changes our sets of local + // subdomains too. + other_mesh.cache_elem_data(); + this->_mesh->cache_elem_data(); + #ifdef LIBMESH_ENABLE_UNIQUE_ID other_mesh.set_next_unique_id(this->_mesh->parallel_max_unique_id()); this->_mesh->set_next_unique_id(this->_mesh->parallel_max_unique_id());