Skip to content

Commit 1cd701e

Browse files
committed
Rename distributed_graph_generator to command_graph_generator
1 parent 501f165 commit 1cd701e

19 files changed

+647
-650
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ endif()
208208
set(SOURCES
209209
src/command_graph.cc
210210
src/config.cc
211-
src/distributed_graph_generator.cc
211+
src/command_graph_generator.cc
212212
src/dry_run_executor.cc
213213
src/grid.cc
214214
src/live_executor.cc
Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ class write_command_state {
5656
command_id m_cid = 0;
5757
};
5858

59-
class distributed_graph_generator {
60-
friend struct distributed_graph_generator_testspy;
59+
class command_graph_generator {
60+
friend struct command_graph_generator_testspy;
6161

6262
inline static const write_command_state no_command = write_command_state(static_cast<command_id>(-1));
6363

@@ -88,8 +88,8 @@ class distributed_graph_generator {
8888
error_policy overlapping_write_error = error_policy::panic;
8989
};
9090

91-
distributed_graph_generator(const size_t num_nodes, const node_id local_nid, command_graph& cdag, const task_manager& tm,
92-
detail::command_recorder* recorder, const policy_set& policy = default_policy_set());
91+
command_graph_generator(const size_t num_nodes, const node_id local_nid, command_graph& cdag, const task_manager& tm, detail::command_recorder* recorder,
92+
const policy_set& policy = default_policy_set());
9393

9494
void notify_buffer_created(buffer_id bid, const range<3>& range, bool host_initialized);
9595

@@ -179,8 +179,8 @@ class distributed_graph_generator {
179179
detail::command_recorder* m_recorder = nullptr;
180180
};
181181

182-
/// Topologically sort a command-set as returned from distributed_graph_generator::build_task() such that sequential execution satisfies all dependencies.
183-
/// TODO refactor distributed_graph_generator to intrinsically generate commands in dependency-order.
182+
/// Topologically sort a command-set as returned from command_graph_generator::build_task() such that sequential execution satisfies all dependencies.
183+
/// TODO refactor command_graph_generator to intrinsically generate commands in dependency-order.
184184
std::vector<abstract_command*> sort_topologically(command_set unmarked);
185185

186186
} // namespace celerity::detail

include/scheduler.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include <thread>
44
#include <variant>
55

6-
#include "distributed_graph_generator.h"
6+
#include "command_graph_generator.h"
77
#include "double_buffered_queue.h"
88
#include "instruction_graph_generator.h"
99
#include "ranges.h"
@@ -30,7 +30,7 @@ namespace detail {
3030
using delegate = instruction_graph_generator::delegate;
3131

3232
struct policy_set {
33-
detail::distributed_graph_generator::policy_set command_graph_generator;
33+
detail::command_graph_generator::policy_set command_graph_generator;
3434
detail::instruction_graph_generator::policy_set instruction_graph_generator;
3535
};
3636

@@ -106,7 +106,7 @@ namespace detail {
106106

107107
std::unique_ptr<command_graph> m_cdag;
108108
command_recorder* m_crec;
109-
std::unique_ptr<distributed_graph_generator> m_dggen;
109+
std::unique_ptr<command_graph_generator> m_cggen;
110110
std::unique_ptr<instruction_graph> m_idag;
111111
instruction_recorder* m_irec;
112112
std::unique_ptr<instruction_graph_generator> m_iggen;
Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#include "distributed_graph_generator.h"
1+
#include "command_graph_generator.h"
22

33
#include "access_modes.h"
44
#include "command.h"
@@ -10,7 +10,7 @@
1010

1111
namespace celerity::detail {
1212

13-
distributed_graph_generator::distributed_graph_generator(
13+
command_graph_generator::command_graph_generator(
1414
const size_t num_nodes, const node_id local_nid, command_graph& cdag, const task_manager& tm, detail::command_recorder* recorder, const policy_set& policy)
1515
: m_num_nodes(num_nodes), m_local_nid(local_nid), m_policy(policy), m_cdag(cdag), m_task_mngr(tm), m_recorder(recorder) {
1616
if(m_num_nodes > max_num_nodes) {
@@ -28,7 +28,7 @@ distributed_graph_generator::distributed_graph_generator(
2828
m_epoch_for_new_commands = epoch_cmd->get_cid();
2929
}
3030

31-
void distributed_graph_generator::notify_buffer_created(const buffer_id bid, const range<3>& range, bool host_initialized) {
31+
void command_graph_generator::notify_buffer_created(const buffer_id bid, const range<3>& range, bool host_initialized) {
3232
m_buffers.emplace(std::piecewise_construct, std::tuple{bid}, std::tuple{range, range});
3333
if(host_initialized && m_policy.uninitialized_read_error != error_policy::ignore) { m_buffers.at(bid).initialized_region = box(subrange({}, range)); }
3434
// Mark contents as available locally (= don't generate await push commands) and fully replicated (= don't generate push commands).
@@ -37,21 +37,21 @@ void distributed_graph_generator::notify_buffer_created(const buffer_id bid, con
3737
m_buffers.at(bid).replicated_regions.update_region(subrange<3>({}, range), node_bitset{}.set());
3838
}
3939

40-
void distributed_graph_generator::notify_buffer_debug_name_changed(const buffer_id bid, const std::string& debug_name) {
40+
void command_graph_generator::notify_buffer_debug_name_changed(const buffer_id bid, const std::string& debug_name) {
4141
m_buffers.at(bid).debug_name = debug_name;
4242
}
4343

44-
void distributed_graph_generator::notify_buffer_destroyed(const buffer_id bid) {
44+
void command_graph_generator::notify_buffer_destroyed(const buffer_id bid) {
4545
assert(m_buffers.count(bid) != 0);
4646
m_buffers.erase(bid);
4747
}
4848

49-
void distributed_graph_generator::notify_host_object_created(const host_object_id hoid) {
49+
void command_graph_generator::notify_host_object_created(const host_object_id hoid) {
5050
assert(m_host_objects.count(hoid) == 0);
5151
m_host_objects.emplace(hoid, host_object_state{m_epoch_for_new_commands});
5252
}
5353

54-
void distributed_graph_generator::notify_host_object_destroyed(const host_object_id hoid) {
54+
void command_graph_generator::notify_host_object_destroyed(const host_object_id hoid) {
5555
assert(m_host_objects.count(hoid) != 0);
5656
m_host_objects.erase(hoid);
5757
}
@@ -98,7 +98,7 @@ std::vector<abstract_command*> sort_topologically(command_set unmarked) {
9898
return sorted;
9999
}
100100

101-
command_set distributed_graph_generator::build_task(const task& tsk) {
101+
command_set command_graph_generator::build_task(const task& tsk) {
102102
assert(m_current_cmd_batch.empty());
103103
[[maybe_unused]] const auto cmd_count_before = m_cdag.command_count();
104104

@@ -147,7 +147,7 @@ command_set distributed_graph_generator::build_task(const task& tsk) {
147147
return std::move(m_current_cmd_batch);
148148
}
149149

150-
void distributed_graph_generator::report_overlapping_writes(const task& tsk, const box_vector<3>& local_chunks) const {
150+
void command_graph_generator::report_overlapping_writes(const task& tsk, const box_vector<3>& local_chunks) const {
151151
const chunk<3> full_chunk{tsk.get_global_offset(), tsk.get_global_size(), tsk.get_global_size()};
152152

153153
// Since this check is run distributed on every node, we avoid quadratic behavior by only checking for conflicts between all local chunks and the
@@ -170,7 +170,7 @@ void distributed_graph_generator::report_overlapping_writes(const task& tsk, con
170170
}
171171
}
172172

173-
void distributed_graph_generator::generate_distributed_commands(const task& tsk) {
173+
void command_graph_generator::generate_distributed_commands(const task& tsk) {
174174
const chunk<3> full_chunk{tsk.get_global_offset(), tsk.get_global_size(), tsk.get_global_size()};
175175
const size_t num_chunks = m_num_nodes * 1; // TODO Make configurable
176176
const auto chunks = ([&] {
@@ -585,7 +585,7 @@ void distributed_graph_generator::generate_distributed_commands(const task& tsk)
585585
process_task_side_effect_requirements(tsk);
586586
}
587587

588-
void distributed_graph_generator::generate_anti_dependencies(
588+
void command_graph_generator::generate_anti_dependencies(
589589
task_id tid, buffer_id bid, const region_map<write_command_state>& last_writers_map, const region<3>& write_req, abstract_command* write_cmd) {
590590
const auto last_writers = last_writers_map.get_region_values(write_req);
591591
for(const auto& [box, wcs] : last_writers) {
@@ -622,7 +622,7 @@ void distributed_graph_generator::generate_anti_dependencies(
622622
}
623623
}
624624

625-
void distributed_graph_generator::process_task_side_effect_requirements(const task& tsk) {
625+
void command_graph_generator::process_task_side_effect_requirements(const task& tsk) {
626626
const task_id tid = tsk.get_id();
627627
if(tsk.get_side_effect_map().empty()) return; // skip the loop in the common case
628628
if(m_cdag.task_command_count(tid) == 0) return;
@@ -644,7 +644,7 @@ void distributed_graph_generator::process_task_side_effect_requirements(const ta
644644
}
645645
}
646646

647-
void distributed_graph_generator::set_epoch_for_new_commands(const abstract_command* const epoch_or_horizon) {
647+
void command_graph_generator::set_epoch_for_new_commands(const abstract_command* const epoch_or_horizon) {
648648
// both an explicit epoch command and an applied horizon can be effective epochs
649649
assert(utils::isa<epoch_command>(epoch_or_horizon) || utils::isa<horizon_command>(epoch_or_horizon));
650650

@@ -665,15 +665,15 @@ void distributed_graph_generator::set_epoch_for_new_commands(const abstract_comm
665665
m_epoch_for_new_commands = epoch_or_horizon->get_cid();
666666
}
667667

668-
void distributed_graph_generator::reduce_execution_front_to(abstract_command* const new_front) {
668+
void command_graph_generator::reduce_execution_front_to(abstract_command* const new_front) {
669669
const auto previous_execution_front = m_cdag.get_execution_front();
670670
for(auto* const front_cmd : previous_execution_front) {
671671
if(front_cmd != new_front) { m_cdag.add_dependency(new_front, front_cmd, dependency_kind::true_dep, dependency_origin::execution_front); }
672672
}
673673
assert(m_cdag.get_execution_front().size() == 1 && *m_cdag.get_execution_front().begin() == new_front);
674674
}
675675

676-
void distributed_graph_generator::generate_epoch_command(const task& tsk) {
676+
void command_graph_generator::generate_epoch_command(const task& tsk) {
677677
assert(tsk.get_type() == task_type::epoch);
678678
auto* const epoch = create_command<epoch_command>(tsk.get_id(), tsk.get_epoch_action(), std::move(m_completed_reductions));
679679
set_epoch_for_new_commands(epoch);
@@ -682,7 +682,7 @@ void distributed_graph_generator::generate_epoch_command(const task& tsk) {
682682
reduce_execution_front_to(epoch);
683683
}
684684

685-
void distributed_graph_generator::generate_horizon_command(const task& tsk) {
685+
void command_graph_generator::generate_horizon_command(const task& tsk) {
686686
assert(tsk.get_type() == task_type::horizon);
687687
auto* const horizon = create_command<horizon_command>(tsk.get_id(), std::move(m_completed_reductions));
688688

@@ -696,7 +696,7 @@ void distributed_graph_generator::generate_horizon_command(const task& tsk) {
696696
reduce_execution_front_to(horizon);
697697
}
698698

699-
void distributed_graph_generator::generate_epoch_dependencies(abstract_command* cmd) {
699+
void command_graph_generator::generate_epoch_dependencies(abstract_command* cmd) {
700700
// No command must be re-ordered before its last preceding epoch to enforce the barrier semantics of epochs.
701701
// To guarantee that each node has a transitive true dependency (=temporal dependency) on the epoch, it is enough to add an epoch -> command dependency
702702
// to any command that has no other true dependencies itself and no graph traversal is necessary. This can be verified by a simple induction proof.
@@ -714,7 +714,7 @@ void distributed_graph_generator::generate_epoch_dependencies(abstract_command*
714714
}
715715
}
716716

717-
void distributed_graph_generator::prune_commands_before(const command_id epoch) {
717+
void command_graph_generator::prune_commands_before(const command_id epoch) {
718718
if(epoch > m_epoch_last_pruned_before) {
719719
m_cdag.erase_if([&](abstract_command* cmd) {
720720
if(cmd->get_cid() < epoch) {
@@ -727,7 +727,7 @@ void distributed_graph_generator::prune_commands_before(const command_id epoch)
727727
}
728728
}
729729

730-
std::string distributed_graph_generator::print_buffer_debug_label(const buffer_id bid) const {
730+
std::string command_graph_generator::print_buffer_debug_label(const buffer_id bid) const {
731731
return utils::make_buffer_debug_label(bid, m_buffers.at(bid).debug_name);
732732
}
733733

src/runtime.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
#include "affinity.h"
2020
#include "backend/sycl_backend.h"
2121
#include "cgf_diagnostics.h"
22+
#include "command_graph_generator.h"
2223
#include "device_selection.h"
23-
#include "distributed_graph_generator.h"
2424
#include "dry_run_executor.h"
2525
#include "host_object.h"
2626
#include "instruction_graph_generator.h"

src/scheduler.cc

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#include "scheduler.h"
22

3-
#include "distributed_graph_generator.h"
3+
#include "command_graph_generator.h"
44
#include "instruction_graph_generator.h"
55
#include "log.h"
66
#include "named_threads.h"
@@ -16,7 +16,7 @@ namespace detail {
1616
abstract_scheduler::abstract_scheduler(const size_t num_nodes, const node_id local_node_id, const system_info& system, const task_manager& tm,
1717
delegate* const delegate, command_recorder* const crec, instruction_recorder* const irec, const policy_set& policy)
1818
: m_cdag(std::make_unique<command_graph>()), m_crec(crec),
19-
m_dggen(std::make_unique<distributed_graph_generator>(num_nodes, local_node_id, *m_cdag, tm, crec, policy.command_graph_generator)),
19+
m_cggen(std::make_unique<command_graph_generator>(num_nodes, local_node_id, *m_cdag, tm, crec, policy.command_graph_generator)),
2020
m_idag(std::make_unique<instruction_graph>()), m_irec(irec), //
2121
m_iggen(std::make_unique<instruction_graph_generator>(
2222
tm, num_nodes, local_node_id, system, *m_idag, delegate, irec, policy.instruction_graph_generator)) {}
@@ -44,7 +44,7 @@ namespace detail {
4444
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::build_task", WebMaroon, "T{} build", tsk.get_id());
4545
CELERITY_DETAIL_TRACY_ZONE_TEXT(utils::make_task_debug_label(tsk.get_type(), tsk.get_id(), tsk.get_debug_name()));
4646

47-
commands = sort_topologically(m_dggen->build_task(tsk));
47+
commands = sort_topologically(m_cggen->build_task(tsk));
4848
}
4949

5050
for(const auto cmd : commands) {
@@ -65,37 +65,37 @@ namespace detail {
6565
[&](const event_buffer_created& e) {
6666
assert(!shutdown_epoch_emitted && !shutdown_epoch_reached);
6767
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::buffer_created", DarkGreen, "B{} create", e.bid);
68-
m_dggen->notify_buffer_created(e.bid, e.range, e.user_allocation_id != null_allocation_id);
68+
m_cggen->notify_buffer_created(e.bid, e.range, e.user_allocation_id != null_allocation_id);
6969
m_iggen->notify_buffer_created(e.bid, e.range, e.elem_size, e.elem_align, e.user_allocation_id);
7070
},
7171
[&](const event_buffer_debug_name_changed& e) {
7272
assert(!shutdown_epoch_emitted && !shutdown_epoch_reached);
7373
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::buffer_name_changed", DarkGreen, "B{} set name", e.bid);
74-
m_dggen->notify_buffer_debug_name_changed(e.bid, e.debug_name);
74+
m_cggen->notify_buffer_debug_name_changed(e.bid, e.debug_name);
7575
m_iggen->notify_buffer_debug_name_changed(e.bid, e.debug_name);
7676
},
7777
[&](const event_buffer_destroyed& e) {
7878
assert(!shutdown_epoch_emitted && !shutdown_epoch_reached);
7979
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::buffer_destroyed", DarkGreen, "B{} destroy", e.bid);
80-
m_dggen->notify_buffer_destroyed(e.bid);
80+
m_cggen->notify_buffer_destroyed(e.bid);
8181
m_iggen->notify_buffer_destroyed(e.bid);
8282
},
8383
[&](const event_host_object_created& e) {
8484
assert(!shutdown_epoch_emitted && !shutdown_epoch_reached);
8585
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::host_object_created", DarkGreen, "H{} create", e.hoid);
86-
m_dggen->notify_host_object_created(e.hoid);
86+
m_cggen->notify_host_object_created(e.hoid);
8787
m_iggen->notify_host_object_created(e.hoid, e.owns_instance);
8888
},
8989
[&](const event_host_object_destroyed& e) {
9090
assert(!shutdown_epoch_emitted && !shutdown_epoch_reached);
9191
CELERITY_DETAIL_TRACY_ZONE_SCOPED_V("scheduler::host_object_destroyed", DarkGreen, "H{} destroy", e.hoid);
92-
m_dggen->notify_host_object_destroyed(e.hoid);
92+
m_cggen->notify_host_object_destroyed(e.hoid);
9393
m_iggen->notify_host_object_destroyed(e.hoid);
9494
},
9595
[&](const event_epoch_reached& e) { //
9696
assert(!shutdown_epoch_reached);
9797
{
98-
// The dggen automatically prunes the CDAG on generation, which is safe because commands are not shared across threads.
98+
// The cggen automatically prunes the CDAG on generation, which is safe because commands are not shared across threads.
9999
// We might want to refactor this to match the IDAG behavior in the future.
100100
CELERITY_DETAIL_TRACY_ZONE_SCOPED("scheduler::prune_idag", Gray);
101101
m_idag->prune_before_epoch(e.tid);

0 commit comments

Comments
 (0)