1- #include " distributed_graph_generator .h"
1+ #include " command_graph_generator .h"
22
33#include " access_modes.h"
44#include " command.h"
1010
1111namespace celerity ::detail {
1212
13- distributed_graph_generator::distributed_graph_generator (
13+ command_graph_generator::command_graph_generator (
1414 const size_t num_nodes, const node_id local_nid, command_graph& cdag, const task_manager& tm, detail::command_recorder* recorder, const policy_set& policy)
1515 : m_num_nodes(num_nodes), m_local_nid(local_nid), m_policy(policy), m_cdag(cdag), m_task_mngr(tm), m_recorder(recorder) {
1616 if (m_num_nodes > max_num_nodes) {
@@ -28,7 +28,7 @@ distributed_graph_generator::distributed_graph_generator(
2828 m_epoch_for_new_commands = epoch_cmd->get_cid ();
2929}
3030
31- void distributed_graph_generator ::notify_buffer_created (const buffer_id bid, const range<3 >& range, bool host_initialized) {
31+ void command_graph_generator ::notify_buffer_created (const buffer_id bid, const range<3 >& range, bool host_initialized) {
3232 m_buffers.emplace (std::piecewise_construct, std::tuple{bid}, std::tuple{range, range});
3333 if (host_initialized && m_policy.uninitialized_read_error != error_policy::ignore) { m_buffers.at (bid).initialized_region = box (subrange ({}, range)); }
3434 // Mark contents as available locally (= don't generate await push commands) and fully replicated (= don't generate push commands).
@@ -37,21 +37,21 @@ void distributed_graph_generator::notify_buffer_created(const buffer_id bid, con
3737 m_buffers.at (bid).replicated_regions .update_region (subrange<3 >({}, range), node_bitset{}.set ());
3838}
3939
40- void distributed_graph_generator ::notify_buffer_debug_name_changed (const buffer_id bid, const std::string& debug_name) {
40+ void command_graph_generator ::notify_buffer_debug_name_changed (const buffer_id bid, const std::string& debug_name) {
4141 m_buffers.at (bid).debug_name = debug_name;
4242}
4343
44- void distributed_graph_generator ::notify_buffer_destroyed (const buffer_id bid) {
44+ void command_graph_generator ::notify_buffer_destroyed (const buffer_id bid) {
4545 assert (m_buffers.count (bid) != 0 );
4646 m_buffers.erase (bid);
4747}
4848
49- void distributed_graph_generator ::notify_host_object_created (const host_object_id hoid) {
49+ void command_graph_generator ::notify_host_object_created (const host_object_id hoid) {
5050 assert (m_host_objects.count (hoid) == 0 );
5151 m_host_objects.emplace (hoid, host_object_state{m_epoch_for_new_commands});
5252}
5353
54- void distributed_graph_generator ::notify_host_object_destroyed (const host_object_id hoid) {
54+ void command_graph_generator ::notify_host_object_destroyed (const host_object_id hoid) {
5555 assert (m_host_objects.count (hoid) != 0 );
5656 m_host_objects.erase (hoid);
5757}
@@ -98,7 +98,7 @@ std::vector<abstract_command*> sort_topologically(command_set unmarked) {
9898 return sorted;
9999}
100100
101- command_set distributed_graph_generator ::build_task (const task& tsk) {
101+ command_set command_graph_generator ::build_task (const task& tsk) {
102102 assert (m_current_cmd_batch.empty ());
103103 [[maybe_unused]] const auto cmd_count_before = m_cdag.command_count ();
104104
@@ -147,7 +147,7 @@ command_set distributed_graph_generator::build_task(const task& tsk) {
147147 return std::move (m_current_cmd_batch);
148148}
149149
150- void distributed_graph_generator ::report_overlapping_writes (const task& tsk, const box_vector<3 >& local_chunks) const {
150+ void command_graph_generator ::report_overlapping_writes (const task& tsk, const box_vector<3 >& local_chunks) const {
151151 const chunk<3 > full_chunk{tsk.get_global_offset (), tsk.get_global_size (), tsk.get_global_size ()};
152152
153153 // Since this check is run distributed on every node, we avoid quadratic behavior by only checking for conflicts between all local chunks and the
@@ -170,7 +170,7 @@ void distributed_graph_generator::report_overlapping_writes(const task& tsk, con
170170 }
171171}
172172
173- void distributed_graph_generator ::generate_distributed_commands (const task& tsk) {
173+ void command_graph_generator ::generate_distributed_commands (const task& tsk) {
174174 const chunk<3 > full_chunk{tsk.get_global_offset (), tsk.get_global_size (), tsk.get_global_size ()};
175175 const size_t num_chunks = m_num_nodes * 1 ; // TODO Make configurable
176176 const auto chunks = ([&] {
@@ -585,7 +585,7 @@ void distributed_graph_generator::generate_distributed_commands(const task& tsk)
585585 process_task_side_effect_requirements (tsk);
586586}
587587
588- void distributed_graph_generator ::generate_anti_dependencies (
588+ void command_graph_generator ::generate_anti_dependencies (
589589 task_id tid, buffer_id bid, const region_map<write_command_state>& last_writers_map, const region<3 >& write_req, abstract_command* write_cmd) {
590590 const auto last_writers = last_writers_map.get_region_values (write_req);
591591 for (const auto & [box, wcs] : last_writers) {
@@ -622,7 +622,7 @@ void distributed_graph_generator::generate_anti_dependencies(
622622 }
623623}
624624
625- void distributed_graph_generator ::process_task_side_effect_requirements (const task& tsk) {
625+ void command_graph_generator ::process_task_side_effect_requirements (const task& tsk) {
626626 const task_id tid = tsk.get_id ();
627627 if (tsk.get_side_effect_map ().empty ()) return ; // skip the loop in the common case
628628 if (m_cdag.task_command_count (tid) == 0 ) return ;
@@ -644,7 +644,7 @@ void distributed_graph_generator::process_task_side_effect_requirements(const ta
644644 }
645645}
646646
647- void distributed_graph_generator ::set_epoch_for_new_commands (const abstract_command* const epoch_or_horizon) {
647+ void command_graph_generator ::set_epoch_for_new_commands (const abstract_command* const epoch_or_horizon) {
648648 // both an explicit epoch command and an applied horizon can be effective epochs
649649 assert (utils::isa<epoch_command>(epoch_or_horizon) || utils::isa<horizon_command>(epoch_or_horizon));
650650
@@ -665,15 +665,15 @@ void distributed_graph_generator::set_epoch_for_new_commands(const abstract_comm
665665 m_epoch_for_new_commands = epoch_or_horizon->get_cid ();
666666}
667667
668- void distributed_graph_generator ::reduce_execution_front_to (abstract_command* const new_front) {
668+ void command_graph_generator ::reduce_execution_front_to (abstract_command* const new_front) {
669669 const auto previous_execution_front = m_cdag.get_execution_front ();
670670 for (auto * const front_cmd : previous_execution_front) {
671671 if (front_cmd != new_front) { m_cdag.add_dependency (new_front, front_cmd, dependency_kind::true_dep, dependency_origin::execution_front); }
672672 }
673673 assert (m_cdag.get_execution_front ().size () == 1 && *m_cdag.get_execution_front ().begin () == new_front);
674674}
675675
676- void distributed_graph_generator ::generate_epoch_command (const task& tsk) {
676+ void command_graph_generator ::generate_epoch_command (const task& tsk) {
677677 assert (tsk.get_type () == task_type::epoch);
678678 auto * const epoch = create_command<epoch_command>(tsk.get_id (), tsk.get_epoch_action (), std::move (m_completed_reductions));
679679 set_epoch_for_new_commands (epoch);
@@ -682,7 +682,7 @@ void distributed_graph_generator::generate_epoch_command(const task& tsk) {
682682 reduce_execution_front_to (epoch);
683683}
684684
685- void distributed_graph_generator ::generate_horizon_command (const task& tsk) {
685+ void command_graph_generator ::generate_horizon_command (const task& tsk) {
686686 assert (tsk.get_type () == task_type::horizon);
687687 auto * const horizon = create_command<horizon_command>(tsk.get_id (), std::move (m_completed_reductions));
688688
@@ -696,7 +696,7 @@ void distributed_graph_generator::generate_horizon_command(const task& tsk) {
696696 reduce_execution_front_to (horizon);
697697}
698698
699- void distributed_graph_generator ::generate_epoch_dependencies (abstract_command* cmd) {
699+ void command_graph_generator ::generate_epoch_dependencies (abstract_command* cmd) {
700700 // No command must be re-ordered before its last preceding epoch to enforce the barrier semantics of epochs.
701701 // To guarantee that each node has a transitive true dependency (=temporal dependency) on the epoch, it is enough to add an epoch -> command dependency
702702 // to any command that has no other true dependencies itself and no graph traversal is necessary. This can be verified by a simple induction proof.
@@ -714,7 +714,7 @@ void distributed_graph_generator::generate_epoch_dependencies(abstract_command*
714714 }
715715}
716716
717- void distributed_graph_generator ::prune_commands_before (const command_id epoch) {
717+ void command_graph_generator ::prune_commands_before (const command_id epoch) {
718718 if (epoch > m_epoch_last_pruned_before) {
719719 m_cdag.erase_if ([&](abstract_command* cmd) {
720720 if (cmd->get_cid () < epoch) {
@@ -727,7 +727,7 @@ void distributed_graph_generator::prune_commands_before(const command_id epoch)
727727 }
728728}
729729
730- std::string distributed_graph_generator ::print_buffer_debug_label (const buffer_id bid) const {
730+ std::string command_graph_generator ::print_buffer_debug_label (const buffer_id bid) const {
731731 return utils::make_buffer_debug_label (bid, m_buffers.at (bid).debug_name );
732732}
733733
0 commit comments