@@ -376,8 +376,6 @@ struct workqueue_struct {
376376 struct wq_node_nr_active * node_nr_active []; /* I: per-node nr_active */
377377};
378378
379- static struct kmem_cache * pwq_cache ;
380-
381379/*
382380 * Each pod type describes how CPUs should be grouped for unbound workqueues.
383381 * See the comment above workqueue_attrs->affn_scope.
@@ -389,20 +387,15 @@ struct wq_pod_type {
389387 int * cpu_pod ; /* cpu -> pod */
390388};
391389
392- static struct wq_pod_type wq_pod_types [WQ_AFFN_NR_TYPES ];
393- static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE ;
394-
395390static const char * wq_affn_names [WQ_AFFN_NR_TYPES ] = {
396- [WQ_AFFN_DFL ] = "default" ,
397- [WQ_AFFN_CPU ] = "cpu" ,
398- [WQ_AFFN_SMT ] = "smt" ,
399- [WQ_AFFN_CACHE ] = "cache" ,
400- [WQ_AFFN_NUMA ] = "numa" ,
401- [WQ_AFFN_SYSTEM ] = "system" ,
391+ [WQ_AFFN_DFL ] = "default" ,
392+ [WQ_AFFN_CPU ] = "cpu" ,
393+ [WQ_AFFN_SMT ] = "smt" ,
394+ [WQ_AFFN_CACHE ] = "cache" ,
395+ [WQ_AFFN_NUMA ] = "numa" ,
396+ [WQ_AFFN_SYSTEM ] = "system" ,
402397};
403398
404- static bool wq_topo_initialized __read_mostly = false;
405-
406399/*
407400 * Per-cpu work items which run for longer than the following threshold are
408401 * automatically considered CPU intensive and excluded from concurrency
@@ -418,6 +411,12 @@ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
418411module_param_named (power_efficient , wq_power_efficient , bool , 0444 );
419412
420413static bool wq_online ; /* can kworkers be created yet? */
414+ static bool wq_topo_initialized __read_mostly = false;
415+
416+ static struct kmem_cache * pwq_cache ;
417+
418+ static struct wq_pod_type wq_pod_types [WQ_AFFN_NR_TYPES ];
419+ static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE ;
421420
422421/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
423422static struct workqueue_attrs * wq_update_pod_attrs_buf ;
@@ -2231,7 +2230,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
22312230 */
22322231 lockdep_assert_irqs_disabled ();
22332232
2234-
22352233 /*
22362234 * For a draining wq, only works from the same workqueue are
22372235 * allowed. The __WQ_DESTROYING helps to spot the issue that
@@ -4121,8 +4119,8 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
41214119 local_irq_restore (flags );
41224120
41234121 /*
4124- * This allows canceling during early boot. We know that @work
4125- * isn't executing.
4122+ * Skip __flush_work() during early boot when we know that @work isn't
4123+ * executing. This allows canceling during early boot .
41264124 */
41274125 if (wq_online )
41284126 __flush_work (work , true);
0 commit comments