3#include "wmtk/TetMesh.h"
4#include "wmtk/TriMesh.h"
5#include "wmtk/utils/Logger.hpp"
10#include <wmtk/utils/DisableWarnings.hpp>
11#include <tbb/concurrent_priority_queue.h>
12#include <tbb/concurrent_queue.h>
13#include <tbb/parallel_for.h>
14#include <tbb/parallel_reduce.h>
15#include <tbb/spin_mutex.h>
16#include <tbb/task_arena.h>
17#include <tbb/task_group.h>
18#include <wmtk/utils/EnableWarnings.hpp>
29enum class ExecutionPolicy { kSeq, kUnSeq, kPartition, kColor, kMax };
31using Op = std::string;
33template <
class AppMesh, ExecutionPolicy policy = ExecutionPolicy::kSeq>
36 using Tuple =
typename AppMesh::Tuple;
43 std::function<std::optional<std::vector<Tuple>>(AppMesh&,
const Tuple&)>>
49 std::function<double(
const AppMesh&, Op op,
const Tuple&)>
priority = [](
auto&,
auto,
auto&) {
56 std::function<bool(
double)>
should_renew = [](
auto) {
return true; };
61 std::function<std::vector<std::pair<Op, Tuple>>(
const AppMesh&, Op,
const std::vector<Tuple>&)>
63 [](
auto&,
auto,
auto&) -> std::vector<std::pair<Op, Tuple>> {
return {}; };
68 std::function<bool(AppMesh&,
const Tuple&,
int task_id)>
lock_vertices =
69 [](
const AppMesh&,
const Tuple&,
int task_id) {
return true; };
91 std::function<bool(
const AppMesh&,
const std::tuple<double, Op, Tuple>& t)>
94 assert(std::get<2>(t).is_valid(m));
100 std::function<void(
const AppMesh&, Op,
const Tuple& t)>
on_fail = [](
auto&,
auto,
auto&) {};
118 if constexpr (std::is_base_of<wmtk::TetMesh, AppMesh>::value) {
121 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
122 std::vector<Tuple> ret;
123 if (m.collapse_edge(t, ret))
129 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
130 std::vector<Tuple> ret;
131 if (m.swap_edge(t, ret))
137 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
138 std::vector<Tuple> ret;
139 if (m.swap_edge_44(t, ret))
145 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
146 std::vector<Tuple> ret;
147 if (m.swap_edge_56(t, ret))
153 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
154 std::vector<Tuple> ret;
155 if (m.split_edge(t, ret))
161 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
162 std::vector<Tuple> ret;
163 if (m.swap_face(t, ret))
169 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
170 if (m.smooth_vertex(t))
171 return std::vector<Tuple>{};
176 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
177 std::vector<Tuple> ret;
178 if (m.split_face(t, ret))
183 {
"tet_split", [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
184 std::vector<Tuple> ret;
185 if (m.split_tet(t, ret))
191 if constexpr (std::is_base_of<wmtk::TriMesh, AppMesh>::value) {
194 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
195 std::vector<Tuple> ret;
196 if (m.collapse_edge(t, ret))
202 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
203 std::vector<Tuple> ret;
204 if (m.swap_edge(t, ret))
210 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
211 std::vector<Tuple> ret;
212 if (m.split_edge(t, ret))
218 [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
219 if (m.smooth_vertex(t))
220 return std::vector<Tuple>{};
224 {
"face_split", [](AppMesh& m,
const Tuple& t) -> std::optional<std::vector<Tuple>> {
225 std::vector<Tuple> ret;
226 if (m.split_face(t, ret))
237 void operation_cleanup(AppMesh& m)
242 if constexpr (policy == ExecutionPolicy::kSeq)
245 m.release_vertex_mutex_in_stack();
249 size_t get_partition_id(
const AppMesh& m,
const Tuple& e)
251 if constexpr (policy == ExecutionPolicy::kSeq)
return 0;
252 if constexpr (std::is_base_of<wmtk::TetMesh, AppMesh>::value)
253 return m.get_partition_id(e);
254 else if constexpr (std::is_base_of<wmtk::TriMesh, AppMesh>::value)
256 return m.vertex_attrs[e.vid(m)].partition_id;
269 bool operator()(AppMesh& m,
const std::vector<std::pair<Op, Tuple>>& operation_tuples)
271 using Elem = std::tuple<double, Op, Tuple, size_t>;
272 using Queue = tbb::concurrent_priority_queue<Elem>;
274 auto stop = std::atomic<bool>(
false);
279 std::vector<Queue> queues(num_threads);
282 auto run_single_queue = [&](Queue& Q,
int task_id) {
284 while ([&]() {
return Q.try_pop(ele_in_queue); }()) {
285 auto& [weight, op, tup, retry] = ele_in_queue;
286 if (!tup.is_valid(m)) {
290 std::vector<Elem> renewed_elements;
299 Q.emplace(ele_in_queue);
302 final_queue.emplace(ele_in_queue);
306 if (tup.is_valid(m)) {
309 std::tuple<double, Op, Tuple>(weight, op, tup))) {
310 operation_cleanup(m);
314 std::vector<std::pair<Op, Tuple>> renewed_tuples;
323 for (
const auto& [o, e] : renewed_tuples) {
326 renewed_elements.emplace_back(val, o, e, 0);
330 operation_cleanup(m);
332 for (
auto& e : renewed_elements) {
336 if (stop.load(std::memory_order_acquire))
return;
342 cnt_update.store(0, std::memory_order_release);
347 if constexpr (policy == ExecutionPolicy::kSeq) {
348 for (
auto& [op, e] : operation_tuples) {
349 if (!e.is_valid(m))
continue;
350 final_queue.emplace(
priority(m, op, e), op, e, 0);
352 run_single_queue(final_queue, 0);
354 for (
auto& [op, e] : operation_tuples) {
355 if (!e.is_valid(m))
continue;
356 queues[get_partition_id(m, e)].emplace(
priority(m, op, e), op, e, 0);
359 tbb::task_arena arena(num_threads);
361 arena.execute([&queues, &run_single_queue, &tg]() {
362 for (
int task_id = 0; task_id < queues.size(); task_id++) {
363 tg.run([&run_single_queue, &queues, task_id] {
364 run_single_queue(queues[task_id], task_id);
369 logger().debug(
"Parallel Complete, remains element {}", final_queue.size());
370 run_single_queue(final_queue, 0);
374 "executed: {} | success / fail: {} / {}",
375 (
int)cnt_success + (
int)cnt_fail,
381 int get_cnt_success()
const {
return cnt_success; }
382 int get_cnt_fail()
const {
return cnt_fail; }
385 std::atomic_int cnt_update = 0;
386 std::atomic_int cnt_success = 0;
387 std::atomic_int cnt_fail = 0;
Definition ExecutionScheduler.hpp:35
size_t max_retry_limit
Definition ExecutionScheduler.hpp:109
std::function< void(const AppMesh &, Op, const Tuple &t)> on_fail
used to collect operations that are not finished and used for later re-execution
Definition ExecutionScheduler.hpp:100
size_t stopping_criterion_checking_frequency
checking frequency to decide whether to stop execution given the stopping criterion
Definition ExecutionScheduler.hpp:84
std::function< std::vector< std::pair< Op, Tuple > >(const AppMesh &, Op, const std::vector< Tuple > &)> renew_neighbor_tuples
renew neighboring Tuples after each operation depends on the operation
Definition ExecutionScheduler.hpp:62
bool operator()(AppMesh &m, const std::vector< std::pair< Op, Tuple > > &operation_tuples)
Executes the operations for an application when the lambda function is invoked. The rules that are cu...
Definition ExecutionScheduler.hpp:269
std::function< bool(double)> should_renew
check on wheather new operations should be added to the priority queue
Definition ExecutionScheduler.hpp:56
std::function< bool(AppMesh &, const Tuple &, int task_id)> lock_vertices
lock the vertices concerned depends on the operation
Definition ExecutionScheduler.hpp:68
std::function< double(const AppMesh &, Op op, const Tuple &)> priority
Priority function (default to edge length)
Definition ExecutionScheduler.hpp:49
std::function< bool(const AppMesh &, const std::tuple< double, Op, Tuple > &t)> is_weight_up_to_date
Should Process drops some Tuple from being processed. For example, if the energy is out-dated....
Definition ExecutionScheduler.hpp:92
std::function< bool(const AppMesh &)> stopping_criterion
Stopping Criterion based on the whole mesh For efficiency, not every time is checked....
Definition ExecutionScheduler.hpp:77
std::map< Op, std::function< std::optional< std::vector< Tuple > >(AppMesh &, const Tuple &)> > edit_operation_maps
A dictionary that registers names with operations.
Definition ExecutionScheduler.hpp:44
ExecutePass()
Construct a new Execute Pass object. It contains the name-to-operation map and the functions that def...
Definition ExecutionScheduler.hpp:116