@@ -137,11 +137,6 @@ static u64 div_round_up(u64 dividend, u64 divisor)
137137 return (dividend + divisor - 1 ) / divisor ;
138138}
139139
140- static bool vtime_before (u64 a , u64 b )
141- {
142- return (s64 )(a - b ) < 0 ;
143- }
144-
145140static bool cgv_node_less (struct bpf_rb_node * a , const struct bpf_rb_node * b )
146141{
147142 struct cgv_node * cgc_a , * cgc_b ;
@@ -271,7 +266,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
271266 */
272267 max_budget = (cgrp_slice_ns * nr_cpus * cgc -> hweight ) /
273268 (2 * FCG_HWEIGHT_ONE );
274- if (vtime_before (cvtime , cvtime_now - max_budget ))
269+ if (time_before (cvtime , cvtime_now - max_budget ))
275270 cvtime = cvtime_now - max_budget ;
276271
277272 cgv_node -> cvtime = cvtime ;
@@ -401,7 +396,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
401396 * Limit the amount of budget that an idling task can accumulate
402397 * to one slice.
403398 */
404- if (vtime_before (tvtime , cgc -> tvtime_now - SCX_SLICE_DFL ))
399+ if (time_before (tvtime , cgc -> tvtime_now - SCX_SLICE_DFL ))
405400 tvtime = cgc -> tvtime_now - SCX_SLICE_DFL ;
406401
407402 scx_bpf_dsq_insert_vtime (p , cgrp -> kn -> id , SCX_SLICE_DFL ,
@@ -535,7 +530,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
535530 * from multiple CPUs and thus racy. Any error should be
536531 * contained and temporary. Let's just live with it.
537532 */
538- if (vtime_before (cgc -> tvtime_now , p -> scx .dsq_vtime ))
533+ if (time_before (cgc -> tvtime_now , p -> scx .dsq_vtime ))
539534 cgc -> tvtime_now = p -> scx .dsq_vtime ;
540535 }
541536 bpf_cgroup_release (cgrp );
@@ -645,7 +640,7 @@ static bool try_pick_next_cgroup(u64 *cgidp)
645640 cgv_node = container_of (rb_node , struct cgv_node , rb_node );
646641 cgid = cgv_node -> cgid ;
647642
648- if (vtime_before (cvtime_now , cgv_node -> cvtime ))
643+ if (time_before (cvtime_now , cgv_node -> cvtime ))
649644 cvtime_now = cgv_node -> cvtime ;
650645
651646 /*
@@ -744,7 +739,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
744739 if (!cpuc -> cur_cgid )
745740 goto pick_next_cgroup ;
746741
747- if (vtime_before (now , cpuc -> cur_at + cgrp_slice_ns )) {
742+ if (time_before (now , cpuc -> cur_at + cgrp_slice_ns )) {
748743 if (scx_bpf_dsq_move_to_local (cpuc -> cur_cgid )) {
749744 stat_inc (FCG_STAT_CNS_KEEP );
750745 return ;
@@ -920,14 +915,14 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
920915 struct cgroup * from , struct cgroup * to )
921916{
922917 struct fcg_cgrp_ctx * from_cgc , * to_cgc ;
923- s64 vtime_delta ;
918+ s64 delta ;
924919
925920 /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
926921 if (!(from_cgc = find_cgrp_ctx (from )) || !(to_cgc = find_cgrp_ctx (to )))
927922 return ;
928923
929- vtime_delta = p -> scx .dsq_vtime - from_cgc -> tvtime_now ;
930- p -> scx .dsq_vtime = to_cgc -> tvtime_now + vtime_delta ;
924+ delta = time_delta ( p -> scx .dsq_vtime , from_cgc -> tvtime_now ) ;
925+ p -> scx .dsq_vtime = to_cgc -> tvtime_now + delta ;
931926}
932927
933928s32 BPF_STRUCT_OPS_SLEEPABLE (fcg_init )
0 commit comments