Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * kernel/workqueue.c - generic async execution with shared worker pool
4 : *
5 : * Copyright (C) 2002 Ingo Molnar
6 : *
7 : * Derived from the taskqueue/keventd code by:
8 : * David Woodhouse <dwmw2@infradead.org>
9 : * Andrew Morton
10 : * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 : * Theodore Ts'o <tytso@mit.edu>
12 : *
13 : * Made to use alloc_percpu by Christoph Lameter.
14 : *
15 : * Copyright (C) 2010 SUSE Linux Products GmbH
16 : * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 : *
18 : * This is the generic async execution mechanism. Work items as are
19 : * executed in process context. The worker pool is shared and
20 : * automatically managed. There are two worker pools for each CPU (one for
21 : * normal work items and the other for high priority ones) and some extra
22 : * pools for workqueues which are not bound to any specific CPU - the
23 : * number of these backing pools is dynamic.
24 : *
25 : * Please read Documentation/core-api/workqueue.rst for details.
26 : */
27 :
28 : #include <linux/export.h>
29 : #include <linux/kernel.h>
30 : #include <linux/sched.h>
31 : #include <linux/init.h>
32 : #include <linux/signal.h>
33 : #include <linux/completion.h>
34 : #include <linux/workqueue.h>
35 : #include <linux/slab.h>
36 : #include <linux/cpu.h>
37 : #include <linux/notifier.h>
38 : #include <linux/kthread.h>
39 : #include <linux/hardirq.h>
40 : #include <linux/mempolicy.h>
41 : #include <linux/freezer.h>
42 : #include <linux/debug_locks.h>
43 : #include <linux/lockdep.h>
44 : #include <linux/idr.h>
45 : #include <linux/jhash.h>
46 : #include <linux/hashtable.h>
47 : #include <linux/rculist.h>
48 : #include <linux/nodemask.h>
49 : #include <linux/moduleparam.h>
50 : #include <linux/uaccess.h>
51 : #include <linux/sched/isolation.h>
52 : #include <linux/nmi.h>
53 :
54 : #include "workqueue_internal.h"
55 :
56 : enum {
57 : /*
58 : * worker_pool flags
59 : *
60 : * A bound pool is either associated or disassociated with its CPU.
61 : * While associated (!DISASSOCIATED), all workers are bound to the
62 : * CPU and none has %WORKER_UNBOUND set and concurrency management
63 : * is in effect.
64 : *
65 : * While DISASSOCIATED, the cpu may be offline and all workers have
66 : * %WORKER_UNBOUND set and concurrency management disabled, and may
67 : * be executing on any CPU. The pool behaves as an unbound one.
68 : *
69 : * Note that DISASSOCIATED should be flipped only while holding
70 : * wq_pool_attach_mutex to avoid changing binding state while
71 : * worker_attach_to_pool() is in progress.
72 : */
73 : POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
74 : POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
75 :
76 : /* worker flags */
77 : WORKER_DIE = 1 << 1, /* die die die */
78 : WORKER_IDLE = 1 << 2, /* is idle */
79 : WORKER_PREP = 1 << 3, /* preparing to run works */
80 : WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
81 : WORKER_UNBOUND = 1 << 7, /* worker is unbound */
82 : WORKER_REBOUND = 1 << 8, /* worker was rebound */
83 :
84 : WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
85 : WORKER_UNBOUND | WORKER_REBOUND,
86 :
87 : NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
88 :
89 : UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
90 : BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
91 :
92 : MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
93 : IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
94 :
95 : MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
96 : /* call for help after 10ms
97 : (min two ticks) */
98 : MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
99 : CREATE_COOLDOWN = HZ, /* time to breath after fail */
100 :
101 : /*
102 : * Rescue workers are used only on emergencies and shared by
103 : * all cpus. Give MIN_NICE.
104 : */
105 : RESCUER_NICE_LEVEL = MIN_NICE,
106 : HIGHPRI_NICE_LEVEL = MIN_NICE,
107 :
108 : WQ_NAME_LEN = 24,
109 : };
110 :
111 : /*
112 : * Structure fields follow one of the following exclusion rules.
113 : *
114 : * I: Modifiable by initialization/destruction paths and read-only for
115 : * everyone else.
116 : *
117 : * P: Preemption protected. Disabling preemption is enough and should
118 : * only be modified and accessed from the local cpu.
119 : *
120 : * L: pool->lock protected. Access with pool->lock held.
121 : *
122 : * X: During normal operation, modification requires pool->lock and should
123 : * be done only from local cpu. Either disabling preemption on local
124 : * cpu or grabbing pool->lock is enough for read access. If
125 : * POOL_DISASSOCIATED is set, it's identical to L.
126 : *
127 : * A: wq_pool_attach_mutex protected.
128 : *
129 : * PL: wq_pool_mutex protected.
130 : *
131 : * PR: wq_pool_mutex protected for writes. RCU protected for reads.
132 : *
133 : * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
134 : *
135 : * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
136 : * RCU for reads.
137 : *
138 : * WQ: wq->mutex protected.
139 : *
140 : * WR: wq->mutex protected for writes. RCU protected for reads.
141 : *
142 : * MD: wq_mayday_lock protected.
143 : */
144 :
145 : /* struct worker is defined in workqueue_internal.h */
146 :
147 : struct worker_pool {
148 : raw_spinlock_t lock; /* the pool lock */
149 : int cpu; /* I: the associated cpu */
150 : int node; /* I: the associated node ID */
151 : int id; /* I: pool ID */
152 : unsigned int flags; /* X: flags */
153 :
154 : unsigned long watchdog_ts; /* L: watchdog timestamp */
155 :
156 : struct list_head worklist; /* L: list of pending works */
157 :
158 : int nr_workers; /* L: total number of workers */
159 : int nr_idle; /* L: currently idle workers */
160 :
161 : struct list_head idle_list; /* X: list of idle workers */
162 : struct timer_list idle_timer; /* L: worker idle timeout */
163 : struct timer_list mayday_timer; /* L: SOS timer for workers */
164 :
165 : /* a workers is either on busy_hash or idle_list, or the manager */
166 : DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
167 : /* L: hash of busy workers */
168 :
169 : struct worker *manager; /* L: purely informational */
170 : struct list_head workers; /* A: attached workers */
171 : struct completion *detach_completion; /* all workers detached */
172 :
173 : struct ida worker_ida; /* worker IDs for task name */
174 :
175 : struct workqueue_attrs *attrs; /* I: worker attributes */
176 : struct hlist_node hash_node; /* PL: unbound_pool_hash node */
177 : int refcnt; /* PL: refcnt for unbound pools */
178 :
179 : /*
180 : * The current concurrency level. As it's likely to be accessed
181 : * from other CPUs during try_to_wake_up(), put it in a separate
182 : * cacheline.
183 : */
184 : atomic_t nr_running ____cacheline_aligned_in_smp;
185 :
186 : /*
187 : * Destruction of pool is RCU protected to allow dereferences
188 : * from get_work_pool().
189 : */
190 : struct rcu_head rcu;
191 : } ____cacheline_aligned_in_smp;
192 :
193 : /*
194 : * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
195 : * of work_struct->data are used for flags and the remaining high bits
196 : * point to the pwq; thus, pwqs need to be aligned at two's power of the
197 : * number of flag bits.
198 : */
199 : struct pool_workqueue {
200 : struct worker_pool *pool; /* I: the associated pool */
201 : struct workqueue_struct *wq; /* I: the owning workqueue */
202 : int work_color; /* L: current color */
203 : int flush_color; /* L: flushing color */
204 : int refcnt; /* L: reference count */
205 : int nr_in_flight[WORK_NR_COLORS];
206 : /* L: nr of in_flight works */
207 : int nr_active; /* L: nr of active works */
208 : int max_active; /* L: max active works */
209 : struct list_head delayed_works; /* L: delayed works */
210 : struct list_head pwqs_node; /* WR: node on wq->pwqs */
211 : struct list_head mayday_node; /* MD: node on wq->maydays */
212 :
213 : /*
214 : * Release of unbound pwq is punted to system_wq. See put_pwq()
215 : * and pwq_unbound_release_workfn() for details. pool_workqueue
216 : * itself is also RCU protected so that the first pwq can be
217 : * determined without grabbing wq->mutex.
218 : */
219 : struct work_struct unbound_release_work;
220 : struct rcu_head rcu;
221 : } __aligned(1 << WORK_STRUCT_FLAG_BITS);
222 :
223 : /*
224 : * Structure used to wait for workqueue flush.
225 : */
226 : struct wq_flusher {
227 : struct list_head list; /* WQ: list of flushers */
228 : int flush_color; /* WQ: flush color waiting for */
229 : struct completion done; /* flush completion */
230 : };
231 :
232 : struct wq_device;
233 :
234 : /*
235 : * The externally visible workqueue. It relays the issued work items to
236 : * the appropriate worker_pool through its pool_workqueues.
237 : */
238 : struct workqueue_struct {
239 : struct list_head pwqs; /* WR: all pwqs of this wq */
240 : struct list_head list; /* PR: list of all workqueues */
241 :
242 : struct mutex mutex; /* protects this wq */
243 : int work_color; /* WQ: current work color */
244 : int flush_color; /* WQ: current flush color */
245 : atomic_t nr_pwqs_to_flush; /* flush in progress */
246 : struct wq_flusher *first_flusher; /* WQ: first flusher */
247 : struct list_head flusher_queue; /* WQ: flush waiters */
248 : struct list_head flusher_overflow; /* WQ: flush overflow list */
249 :
250 : struct list_head maydays; /* MD: pwqs requesting rescue */
251 : struct worker *rescuer; /* MD: rescue worker */
252 :
253 : int nr_drainers; /* WQ: drain in progress */
254 : int saved_max_active; /* WQ: saved pwq max_active */
255 :
256 : struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
257 : struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
258 :
259 : #ifdef CONFIG_SYSFS
260 : struct wq_device *wq_dev; /* I: for sysfs interface */
261 : #endif
262 : #ifdef CONFIG_LOCKDEP
263 : char *lock_name;
264 : struct lock_class_key key;
265 : struct lockdep_map lockdep_map;
266 : #endif
267 : char name[WQ_NAME_LEN]; /* I: workqueue name */
268 :
269 : /*
270 : * Destruction of workqueue_struct is RCU protected to allow walking
271 : * the workqueues list without grabbing wq_pool_mutex.
272 : * This is used to dump all workqueues from sysrq.
273 : */
274 : struct rcu_head rcu;
275 :
276 : /* hot fields used during command issue, aligned to cacheline */
277 : unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
278 : struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
279 : struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
280 : };
281 :
282 : static struct kmem_cache *pwq_cache;
283 :
284 : static cpumask_var_t *wq_numa_possible_cpumask;
285 : /* possible CPUs of each node */
286 :
287 : static bool wq_disable_numa;
288 : module_param_named(disable_numa, wq_disable_numa, bool, 0444);
289 :
290 : /* see the comment above the definition of WQ_POWER_EFFICIENT */
291 : static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
292 : module_param_named(power_efficient, wq_power_efficient, bool, 0444);
293 :
294 : static bool wq_online; /* can kworkers be created yet? */
295 :
296 : static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
297 :
298 : /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
299 : static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
300 :
301 : static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
302 : static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
303 : static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
304 : /* wait for manager to go away */
305 : static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
306 :
307 : static LIST_HEAD(workqueues); /* PR: list of all workqueues */
308 : static bool workqueue_freezing; /* PL: have wqs started freezing? */
309 :
310 : /* PL: allowable cpus for unbound wqs and work items */
311 : static cpumask_var_t wq_unbound_cpumask;
312 :
313 : /* CPU where unbound work was last round robin scheduled from this CPU */
314 : static DEFINE_PER_CPU(int, wq_rr_cpu_last);
315 :
316 : /*
317 : * Local execution of unbound work items is no longer guaranteed. The
318 : * following always forces round-robin CPU selection on unbound work items
319 : * to uncover usages which depend on it.
320 : */
321 : #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
322 : static bool wq_debug_force_rr_cpu = true;
323 : #else
324 : static bool wq_debug_force_rr_cpu = false;
325 : #endif
326 : module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
327 :
328 : /* the per-cpu worker pools */
329 : static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
330 :
331 : static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
332 :
333 : /* PL: hash of all unbound pools keyed by pool->attrs */
334 : static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
335 :
336 : /* I: attributes used when instantiating standard unbound pools on demand */
337 : static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
338 :
339 : /* I: attributes used when instantiating ordered pools on demand */
340 : static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
341 :
342 : struct workqueue_struct *system_wq __read_mostly;
343 : EXPORT_SYMBOL(system_wq);
344 : struct workqueue_struct *system_highpri_wq __read_mostly;
345 : EXPORT_SYMBOL_GPL(system_highpri_wq);
346 : struct workqueue_struct *system_long_wq __read_mostly;
347 : EXPORT_SYMBOL_GPL(system_long_wq);
348 : struct workqueue_struct *system_unbound_wq __read_mostly;
349 : EXPORT_SYMBOL_GPL(system_unbound_wq);
350 : struct workqueue_struct *system_freezable_wq __read_mostly;
351 : EXPORT_SYMBOL_GPL(system_freezable_wq);
352 : struct workqueue_struct *system_power_efficient_wq __read_mostly;
353 : EXPORT_SYMBOL_GPL(system_power_efficient_wq);
354 : struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
355 : EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
356 :
357 : static int worker_thread(void *__worker);
358 : static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
359 : static void show_pwq(struct pool_workqueue *pwq);
360 :
361 : #define CREATE_TRACE_POINTS
362 : #include <trace/events/workqueue.h>
363 :
364 : #define assert_rcu_or_pool_mutex() \
365 : RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
366 : !lockdep_is_held(&wq_pool_mutex), \
367 : "RCU or wq_pool_mutex should be held")
368 :
369 : #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
370 : RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
371 : !lockdep_is_held(&wq->mutex) && \
372 : !lockdep_is_held(&wq_pool_mutex), \
373 : "RCU, wq->mutex or wq_pool_mutex should be held")
374 :
375 : #define for_each_cpu_worker_pool(pool, cpu) \
376 : for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
377 : (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
378 : (pool)++)
379 :
380 : /**
381 : * for_each_pool - iterate through all worker_pools in the system
382 : * @pool: iteration cursor
383 : * @pi: integer used for iteration
384 : *
385 : * This must be called either with wq_pool_mutex held or RCU read
386 : * locked. If the pool needs to be used beyond the locking in effect, the
387 : * caller is responsible for guaranteeing that the pool stays online.
388 : *
389 : * The if/else clause exists only for the lockdep assertion and can be
390 : * ignored.
391 : */
392 : #define for_each_pool(pool, pi) \
393 : idr_for_each_entry(&worker_pool_idr, pool, pi) \
394 : if (({ assert_rcu_or_pool_mutex(); false; })) { } \
395 : else
396 :
397 : /**
398 : * for_each_pool_worker - iterate through all workers of a worker_pool
399 : * @worker: iteration cursor
400 : * @pool: worker_pool to iterate workers of
401 : *
402 : * This must be called with wq_pool_attach_mutex.
403 : *
404 : * The if/else clause exists only for the lockdep assertion and can be
405 : * ignored.
406 : */
407 : #define for_each_pool_worker(worker, pool) \
408 : list_for_each_entry((worker), &(pool)->workers, node) \
409 : if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
410 : else
411 :
412 : /**
413 : * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
414 : * @pwq: iteration cursor
415 : * @wq: the target workqueue
416 : *
417 : * This must be called either with wq->mutex held or RCU read locked.
418 : * If the pwq needs to be used beyond the locking in effect, the caller is
419 : * responsible for guaranteeing that the pwq stays online.
420 : *
421 : * The if/else clause exists only for the lockdep assertion and can be
422 : * ignored.
423 : */
424 : #define for_each_pwq(pwq, wq) \
425 : list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
426 : lockdep_is_held(&(wq->mutex)))
427 :
428 : #ifdef CONFIG_DEBUG_OBJECTS_WORK
429 :
430 : static const struct debug_obj_descr work_debug_descr;
431 :
432 0 : static void *work_debug_hint(void *addr)
433 : {
434 0 : return ((struct work_struct *) addr)->func;
435 : }
436 :
437 11 : static bool work_is_static_object(void *addr)
438 : {
439 11 : struct work_struct *work = addr;
440 :
441 11 : return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
442 : }
443 :
444 : /*
445 : * fixup_init is called when:
446 : * - an active object is initialized
447 : */
448 0 : static bool work_fixup_init(void *addr, enum debug_obj_state state)
449 : {
450 0 : struct work_struct *work = addr;
451 :
452 0 : switch (state) {
453 : case ODEBUG_STATE_ACTIVE:
454 0 : cancel_work_sync(work);
455 0 : debug_object_init(work, &work_debug_descr);
456 0 : return true;
457 : default:
458 : return false;
459 : }
460 : }
461 :
462 : /*
463 : * fixup_free is called when:
464 : * - an active object is freed
465 : */
466 0 : static bool work_fixup_free(void *addr, enum debug_obj_state state)
467 : {
468 0 : struct work_struct *work = addr;
469 :
470 0 : switch (state) {
471 : case ODEBUG_STATE_ACTIVE:
472 0 : cancel_work_sync(work);
473 0 : debug_object_free(work, &work_debug_descr);
474 0 : return true;
475 : default:
476 : return false;
477 : }
478 : }
479 :
480 : static const struct debug_obj_descr work_debug_descr = {
481 : .name = "work_struct",
482 : .debug_hint = work_debug_hint,
483 : .is_static_object = work_is_static_object,
484 : .fixup_init = work_fixup_init,
485 : .fixup_free = work_fixup_free,
486 : };
487 :
488 1905 : static inline void debug_work_activate(struct work_struct *work)
489 : {
490 1905 : debug_object_activate(work, &work_debug_descr);
491 : }
492 :
493 1906 : static inline void debug_work_deactivate(struct work_struct *work)
494 : {
495 1906 : debug_object_deactivate(work, &work_debug_descr);
496 : }
497 :
498 6662 : void __init_work(struct work_struct *work, int onstack)
499 : {
500 6586 : if (onstack)
501 159 : debug_object_init_on_stack(work, &work_debug_descr);
502 : else
503 6427 : debug_object_init(work, &work_debug_descr);
504 6586 : }
505 : EXPORT_SYMBOL_GPL(__init_work);
506 :
507 170 : void destroy_work_on_stack(struct work_struct *work)
508 : {
509 159 : debug_object_free(work, &work_debug_descr);
510 159 : }
511 : EXPORT_SYMBOL_GPL(destroy_work_on_stack);
512 :
513 0 : void destroy_delayed_work_on_stack(struct delayed_work *work)
514 : {
515 0 : destroy_timer_on_stack(&work->timer);
516 0 : debug_object_free(&work->work, &work_debug_descr);
517 0 : }
518 : EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
519 :
520 : #else
521 : static inline void debug_work_activate(struct work_struct *work) { }
522 : static inline void debug_work_deactivate(struct work_struct *work) { }
523 : #endif
524 :
525 : /**
526 : * worker_pool_assign_id - allocate ID and assing it to @pool
527 : * @pool: the pool pointer of interest
528 : *
529 : * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
530 : * successfully, -errno on failure.
531 : */
532 9 : static int worker_pool_assign_id(struct worker_pool *pool)
533 : {
534 9 : int ret;
535 :
536 27 : lockdep_assert_held(&wq_pool_mutex);
537 :
538 9 : ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
539 : GFP_KERNEL);
540 9 : if (ret >= 0) {
541 9 : pool->id = ret;
542 9 : return 0;
543 : }
544 : return ret;
545 : }
546 :
547 : /**
548 : * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
549 : * @wq: the target workqueue
550 : * @node: the node ID
551 : *
552 : * This must be called with any of wq_pool_mutex, wq->mutex or RCU
553 : * read locked.
554 : * If the pwq needs to be used beyond the locking in effect, the caller is
555 : * responsible for guaranteeing that the pwq stays online.
556 : *
557 : * Return: The unbound pool_workqueue for @node.
558 : */
559 117 : static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
560 : int node)
561 : {
562 117 : assert_rcu_or_wq_mutex_or_pool_mutex(wq);
563 :
564 : /*
565 : * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
566 : * delayed item is pending. The plan is to keep CPU -> NODE
567 : * mapping valid and stable across CPU on/offlines. Once that
568 : * happens, this workaround can be removed.
569 : */
570 117 : if (unlikely(node == NUMA_NO_NODE))
571 0 : return wq->dfl_pwq;
572 :
573 117 : return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
574 : }
575 :
576 1905 : static unsigned int work_color_to_flags(int color)
577 : {
578 1905 : return color << WORK_STRUCT_COLOR_SHIFT;
579 : }
580 :
581 1906 : static int get_work_color(struct work_struct *work)
582 : {
583 1906 : return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
584 : ((1 << WORK_STRUCT_COLOR_BITS) - 1);
585 : }
586 :
587 242 : static int work_next_color(int color)
588 : {
589 242 : return (color + 1) % WORK_NR_COLORS;
590 : }
591 :
592 : /*
593 : * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
594 : * contain the pointer to the queued pwq. Once execution starts, the flag
595 : * is cleared and the high bits contain OFFQ flags and pool ID.
596 : *
597 : * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
598 : * and clear_work_data() can be used to set the pwq, pool or clear
599 : * work->data. These functions should only be called while the work is
600 : * owned - ie. while the PENDING bit is set.
601 : *
602 : * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
603 : * corresponding to a work. Pool is available once the work has been
604 : * queued anywhere after initialization until it is sync canceled. pwq is
605 : * available only while the work item is queued.
606 : *
607 : * %WORK_OFFQ_CANCELING is used to mark a work item which is being
608 : * canceled. While being canceled, a work item may have its PENDING set
609 : * but stay off timer and worklist for arbitrarily long and nobody should
610 : * try to steal the PENDING bit.
611 : */
612 4178 : static inline void set_work_data(struct work_struct *work, unsigned long data,
613 : unsigned long flags)
614 : {
615 4178 : WARN_ON_ONCE(!work_pending(work));
616 4181 : atomic_long_set(&work->data, data | flags | work_static(work));
617 4182 : }
618 :
619 1905 : static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
620 : unsigned long extra_flags)
621 : {
622 1905 : set_work_data(work, (unsigned long)pwq,
623 : WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
624 : }
625 :
626 0 : static void set_work_pool_and_keep_pending(struct work_struct *work,
627 : int pool_id)
628 : {
629 0 : set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
630 : WORK_STRUCT_PENDING);
631 : }
632 :
633 1908 : static void set_work_pool_and_clear_pending(struct work_struct *work,
634 : int pool_id)
635 : {
636 : /*
637 : * The following wmb is paired with the implied mb in
638 : * test_and_set_bit(PENDING) and ensures all updates to @work made
639 : * here are visible to and precede any updates by the next PENDING
640 : * owner.
641 : */
642 1908 : smp_wmb();
643 1908 : set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
644 : /*
645 : * The following mb guarantees that previous clear of a PENDING bit
646 : * will not be reordered with any speculative LOADS or STORES from
647 : * work->current_func, which is executed afterwards. This possible
648 : * reordering can lead to a missed execution on attempt to queue
649 : * the same @work. E.g. consider this case:
650 : *
651 : * CPU#0 CPU#1
652 : * ---------------------------- --------------------------------
653 : *
654 : * 1 STORE event_indicated
655 : * 2 queue_work_on() {
656 : * 3 test_and_set_bit(PENDING)
657 : * 4 } set_..._and_clear_pending() {
658 : * 5 set_work_data() # clear bit
659 : * 6 smp_mb()
660 : * 7 work->current_func() {
661 : * 8 LOAD event_indicated
662 : * }
663 : *
664 : * Without an explicit full barrier speculative LOAD on line 8 can
665 : * be executed before CPU#0 does STORE on line 1. If that happens,
666 : * CPU#0 observes the PENDING bit is still set and new execution of
667 : * a @work is not queued in a hope, that CPU#1 will eventually
668 : * finish the queued @work. Meanwhile CPU#1 does not see
669 : * event_indicated is set, because speculative LOAD was executed
670 : * before actual STORE.
671 : */
672 1908 : smp_mb();
673 1908 : }
674 :
675 184 : static void clear_work_data(struct work_struct *work)
676 : {
677 184 : smp_wmb(); /* see set_work_pool_and_clear_pending() */
678 184 : set_work_data(work, WORK_STRUCT_NO_POOL, 0);
679 : }
680 :
681 1963 : static struct pool_workqueue *get_work_pwq(struct work_struct *work)
682 : {
683 1963 : unsigned long data = atomic_long_read(&work->data);
684 :
685 1963 : if (data & WORK_STRUCT_PWQ)
686 1954 : return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
687 : else
688 : return NULL;
689 : }
690 :
691 : /**
692 : * get_work_pool - return the worker_pool a given work was associated with
693 : * @work: the work item of interest
694 : *
695 : * Pools are created and destroyed under wq_pool_mutex, and allows read
696 : * access under RCU read lock. As such, this function should be
697 : * called under wq_pool_mutex or inside of a rcu_read_lock() region.
698 : *
699 : * All fields of the returned pool are accessible as long as the above
700 : * mentioned locking is in effect. If the returned pool needs to be used
701 : * beyond the critical section, the caller is responsible for ensuring the
702 : * returned pool is and stays online.
703 : *
704 : * Return: The worker_pool @work was last associated with. %NULL if none.
705 : */
706 2365 : static struct worker_pool *get_work_pool(struct work_struct *work)
707 : {
708 2365 : unsigned long data = atomic_long_read(&work->data);
709 2365 : int pool_id;
710 :
711 2365 : assert_rcu_or_pool_mutex();
712 :
713 2368 : if (data & WORK_STRUCT_PWQ)
714 9 : return ((struct pool_workqueue *)
715 9 : (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
716 :
717 2359 : pool_id = data >> WORK_OFFQ_POOL_SHIFT;
718 2359 : if (pool_id == WORK_OFFQ_POOL_NONE)
719 : return NULL;
720 :
721 1172 : return idr_find(&worker_pool_idr, pool_id);
722 : }
723 :
724 : /**
725 : * get_work_pool_id - return the worker pool ID a given work is associated with
726 : * @work: the work item of interest
727 : *
728 : * Return: The worker_pool ID @work was last associated with.
729 : * %WORK_OFFQ_POOL_NONE if none.
730 : */
731 186 : static int get_work_pool_id(struct work_struct *work)
732 : {
733 186 : unsigned long data = atomic_long_read(&work->data);
734 :
735 186 : if (data & WORK_STRUCT_PWQ)
736 0 : return ((struct pool_workqueue *)
737 0 : (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
738 :
739 186 : return data >> WORK_OFFQ_POOL_SHIFT;
740 : }
741 :
742 184 : static void mark_work_canceling(struct work_struct *work)
743 : {
744 184 : unsigned long pool_id = get_work_pool_id(work);
745 :
746 184 : pool_id <<= WORK_OFFQ_POOL_SHIFT;
747 184 : set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
748 184 : }
749 :
750 0 : static bool work_is_canceling(struct work_struct *work)
751 : {
752 0 : unsigned long data = atomic_long_read(&work->data);
753 :
754 0 : return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
755 : }
756 :
757 : /*
758 : * Policy functions. These define the policies on how the global worker
759 : * pools are managed. Unless noted otherwise, these functions assume that
760 : * they're being called with pool->lock held.
761 : */
762 :
763 3724 : static bool __need_more_worker(struct worker_pool *pool)
764 : {
765 1818 : return !atomic_read(&pool->nr_running);
766 : }
767 :
768 : /*
769 : * Need to wake up a worker? Called from anything but currently
770 : * running workers.
771 : *
772 : * Note that, because unbound workers never contribute to nr_running, this
773 : * function will always return %true for unbound pools as long as the
774 : * worklist isn't empty.
775 : */
776 3601 : static bool need_more_worker(struct worker_pool *pool)
777 : {
778 5419 : return !list_empty(&pool->worklist) && __need_more_worker(pool);
779 : }
780 :
781 : /* Can I start working? Called from busy but !running workers. */
782 1661 : static bool may_start_working(struct worker_pool *pool)
783 : {
784 1661 : return pool->nr_idle;
785 : }
786 :
787 : /* Do I need to keep working? Called from currently running workers. */
788 1895 : static bool keep_working(struct worker_pool *pool)
789 : {
790 1895 : return !list_empty(&pool->worklist) &&
791 256 : atomic_read(&pool->nr_running) <= 1;
792 : }
793 :
794 : /* Do we need a new worker? Called from manager. */
795 13 : static bool need_to_create_worker(struct worker_pool *pool)
796 : {
797 13 : return need_more_worker(pool) && !may_start_working(pool);
798 : }
799 :
800 : /* Do we have too many workers and should some go away? */
801 1691 : static bool too_many_workers(struct worker_pool *pool)
802 : {
803 1691 : bool managing = pool->flags & POOL_MANAGER_ACTIVE;
804 1691 : int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
805 1691 : int nr_busy = pool->nr_workers - nr_idle;
806 :
807 1691 : return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
808 : }
809 :
810 : /*
811 : * Wake up functions.
812 : */
813 :
814 : /* Return the first idle worker. Safe with preemption disabled */
815 1838 : static struct worker *first_idle_worker(struct worker_pool *pool)
816 : {
817 1838 : if (unlikely(list_empty(&pool->idle_list)))
818 : return NULL;
819 :
820 1836 : return list_first_entry(&pool->idle_list, struct worker, entry);
821 : }
822 :
823 : /**
824 : * wake_up_worker - wake up an idle worker
825 : * @pool: worker pool to wake worker from
826 : *
827 : * Wake up the first idle worker of @pool.
828 : *
829 : * CONTEXT:
830 : * raw_spin_lock_irq(pool->lock).
831 : */
832 1837 : static void wake_up_worker(struct worker_pool *pool)
833 : {
834 1837 : struct worker *worker = first_idle_worker(pool);
835 :
836 1837 : if (likely(worker))
837 1836 : wake_up_process(worker->task);
838 1841 : }
839 :
840 : /**
841 : * wq_worker_running - a worker is running again
842 : * @task: task waking up
843 : *
844 : * This function is called when a worker returns from schedule()
845 : */
846 1869 : void wq_worker_running(struct task_struct *task)
847 : {
848 1869 : struct worker *worker = kthread_data(task);
849 :
850 1870 : if (!worker->sleeping)
851 : return;
852 151 : if (!(worker->flags & WORKER_NOT_RUNNING))
853 151 : atomic_inc(&worker->pool->nr_running);
854 151 : worker->sleeping = 0;
855 : }
856 :
857 : /**
858 : * wq_worker_sleeping - a worker is going to sleep
859 : * @task: task going to sleep
860 : *
861 : * This function is called from schedule() when a busy worker is
862 : * going to sleep. Preemption needs to be disabled to protect ->sleeping
863 : * assignment.
864 : */
865 1898 : void wq_worker_sleeping(struct task_struct *task)
866 : {
867 1898 : struct worker *next, *worker = kthread_data(task);
868 1898 : struct worker_pool *pool;
869 :
870 : /*
871 : * Rescuers, which may not have all the fields set up like normal
872 : * workers, also reach here, let's not access anything before
873 : * checking NOT_RUNNING.
874 : */
875 1898 : if (worker->flags & WORKER_NOT_RUNNING)
876 : return;
877 :
878 151 : pool = worker->pool;
879 :
880 : /* Return if preempted before wq_worker_running() was reached */
881 151 : if (worker->sleeping)
882 : return;
883 :
884 151 : worker->sleeping = 1;
885 151 : raw_spin_lock_irq(&pool->lock);
886 :
887 : /*
888 : * The counterpart of the following dec_and_test, implied mb,
889 : * worklist not empty test sequence is in insert_work().
890 : * Please read comment there.
891 : *
892 : * NOT_RUNNING is clear. This means that we're bound to and
893 : * running on the local cpu w/ rq lock held and preemption
894 : * disabled, which in turn means that none else could be
895 : * manipulating idle_list, so dereferencing idle_list without pool
896 : * lock is safe.
897 : */
898 302 : if (atomic_dec_and_test(&pool->nr_running) &&
899 151 : !list_empty(&pool->worklist)) {
900 1 : next = first_idle_worker(pool);
901 1 : if (next)
902 1 : wake_up_process(next->task);
903 : }
904 151 : raw_spin_unlock_irq(&pool->lock);
905 : }
906 :
907 : /**
908 : * wq_worker_last_func - retrieve worker's last work function
909 : * @task: Task to retrieve last work function of.
910 : *
911 : * Determine the last function a worker executed. This is called from
912 : * the scheduler to get a worker's last known identity.
913 : *
914 : * CONTEXT:
915 : * raw_spin_lock_irq(rq->lock)
916 : *
917 : * This function is called during schedule() when a kworker is going
918 : * to sleep. It's used by psi to identify aggregation workers during
919 : * dequeuing, to allow periodic aggregation to shut-off when that
920 : * worker is the last task in the system or cgroup to go to sleep.
921 : *
922 : * As this function doesn't involve any workqueue-related locking, it
923 : * only returns stable values when called from inside the scheduler's
924 : * queuing and dequeuing paths, when @task, which must be a kworker,
925 : * is guaranteed to not be processing any works.
926 : *
927 : * Return:
928 : * The last work function %current executed as a worker, NULL if it
929 : * hasn't executed any work yet.
930 : */
931 0 : work_func_t wq_worker_last_func(struct task_struct *task)
932 : {
933 0 : struct worker *worker = kthread_data(task);
934 :
935 0 : return worker->last_func;
936 : }
937 :
938 : /**
939 : * worker_set_flags - set worker flags and adjust nr_running accordingly
940 : * @worker: self
941 : * @flags: flags to set
942 : *
943 : * Set @flags in @worker->flags and adjust nr_running accordingly.
944 : *
945 : * CONTEXT:
946 : * raw_spin_lock_irq(pool->lock)
947 : */
948 1639 : static inline void worker_set_flags(struct worker *worker, unsigned int flags)
949 : {
950 1639 : struct worker_pool *pool = worker->pool;
951 :
952 1639 : WARN_ON_ONCE(worker->task != current);
953 :
954 : /* If transitioning into NOT_RUNNING, adjust nr_running. */
955 1639 : if ((flags & WORKER_NOT_RUNNING) &&
956 1639 : !(worker->flags & WORKER_NOT_RUNNING)) {
957 1525 : atomic_dec(&pool->nr_running);
958 : }
959 :
960 1639 : worker->flags |= flags;
961 1639 : }
962 :
963 : /**
964 : * worker_clr_flags - clear worker flags and adjust nr_running accordingly
965 : * @worker: self
966 : * @flags: flags to clear
967 : *
968 : * Clear @flags in @worker->flags and adjust nr_running accordingly.
969 : *
970 : * CONTEXT:
971 : * raw_spin_lock_irq(pool->lock)
972 : */
973 3308 : static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
974 : {
975 3308 : struct worker_pool *pool = worker->pool;
976 3308 : unsigned int oflags = worker->flags;
977 :
978 3308 : WARN_ON_ONCE(worker->task != current);
979 :
980 3308 : worker->flags &= ~flags;
981 :
982 : /*
983 : * If transitioning out of NOT_RUNNING, increment nr_running. Note
984 : * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
985 : * of multiple flags, not a single flag.
986 : */
987 3308 : if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
988 1639 : if (!(worker->flags & WORKER_NOT_RUNNING))
989 1525 : atomic_inc(&pool->nr_running);
990 3308 : }
991 :
992 : /**
993 : * find_worker_executing_work - find worker which is executing a work
994 : * @pool: pool of interest
995 : * @work: work to find worker for
996 : *
997 : * Find a worker which is executing @work on @pool by searching
998 : * @pool->busy_hash which is keyed by the address of @work. For a worker
999 : * to match, its current execution should match the address of @work and
1000 : * its work function. This is to avoid unwanted dependency between
1001 : * unrelated work executions through a work item being recycled while still
1002 : * being executed.
1003 : *
1004 : * This is a bit tricky. A work item may be freed once its execution
1005 : * starts and nothing prevents the freed area from being recycled for
1006 : * another work item. If the same work item address ends up being reused
1007 : * before the original execution finishes, workqueue will identify the
1008 : * recycled work item as currently executing and make it wait until the
1009 : * current execution finishes, introducing an unwanted dependency.
1010 : *
1011 : * This function checks the work item address and work function to avoid
1012 : * false positives. Note that this isn't complete as one may construct a
1013 : * work function which can introduce dependency onto itself through a
1014 : * recycled work item. Well, if somebody wants to shoot oneself in the
1015 : * foot that badly, there's only so much we can do, and if such deadlock
1016 : * actually occurs, it should be easy to locate the culprit work function.
1017 : *
1018 : * CONTEXT:
1019 : * raw_spin_lock_irq(pool->lock).
1020 : *
1021 : * Return:
1022 : * Pointer to worker which is executing @work if found, %NULL
1023 : * otherwise.
1024 : */
1025 2194 : static struct worker *find_worker_executing_work(struct worker_pool *pool,
1026 : struct work_struct *work)
1027 : {
1028 2194 : struct worker *worker;
1029 :
1030 4388 : hash_for_each_possible(pool->busy_hash, worker, hentry,
1031 : (unsigned long)work)
1032 3 : if (worker->current_work == work &&
1033 3 : worker->current_func == work->func)
1034 3 : return worker;
1035 :
1036 : return NULL;
1037 : }
1038 :
1039 : /**
1040 : * move_linked_works - move linked works to a list
1041 : * @work: start of series of works to be scheduled
1042 : * @head: target list to append @work to
1043 : * @nextp: out parameter for nested worklist walking
1044 : *
1045 : * Schedule linked works starting from @work to @head. Work series to
1046 : * be scheduled starts at @work and includes any consecutive work with
1047 : * WORK_STRUCT_LINKED set in its predecessor.
1048 : *
1049 : * If @nextp is not NULL, it's updated to point to the next work of
1050 : * the last scheduled work. This allows move_linked_works() to be
1051 : * nested inside outer list_for_each_entry_safe().
1052 : *
1053 : * CONTEXT:
1054 : * raw_spin_lock_irq(pool->lock).
1055 : */
1056 48 : static void move_linked_works(struct work_struct *work, struct list_head *head,
1057 : struct work_struct **nextp)
1058 : {
1059 48 : struct work_struct *n;
1060 :
1061 : /*
1062 : * Linked worklist will always end before the end of the list,
1063 : * use NULL for list head.
1064 : */
1065 56 : list_for_each_entry_safe_from(work, n, NULL, entry) {
1066 56 : list_move_tail(&work->entry, head);
1067 56 : if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1068 : break;
1069 : }
1070 :
1071 : /*
1072 : * If we're already inside safe list traversal and have moved
1073 : * multiple works to the scheduled queue, the next position
1074 : * needs to be updated.
1075 : */
1076 48 : if (nextp)
1077 0 : *nextp = n;
1078 48 : }
1079 :
1080 : /**
1081 : * get_pwq - get an extra reference on the specified pool_workqueue
1082 : * @pwq: pool_workqueue to get
1083 : *
1084 : * Obtain an extra reference on @pwq. The caller should guarantee that
1085 : * @pwq has positive refcnt and be holding the matching pool->lock.
1086 : */
1087 1903 : static void get_pwq(struct pool_workqueue *pwq)
1088 : {
1089 3809 : lockdep_assert_held(&pwq->pool->lock);
1090 1905 : WARN_ON_ONCE(pwq->refcnt <= 0);
1091 1905 : pwq->refcnt++;
1092 1905 : }
1093 :
1094 : /**
1095 : * put_pwq - put a pool_workqueue reference
1096 : * @pwq: pool_workqueue to put
1097 : *
1098 : * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1099 : * destruction. The caller should be holding the matching pool->lock.
1100 : */
1101 1908 : static void put_pwq(struct pool_workqueue *pwq)
1102 : {
1103 3816 : lockdep_assert_held(&pwq->pool->lock);
1104 1908 : if (likely(--pwq->refcnt))
1105 : return;
1106 1 : if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1107 : return;
1108 : /*
1109 : * @pwq can't be released under pool->lock, bounce to
1110 : * pwq_unbound_release_workfn(). This never recurses on the same
1111 : * pool->lock as this path is taken only for unbound workqueues and
1112 : * the release work item is scheduled on a per-cpu workqueue. To
1113 : * avoid lockdep warning, unbound pool->locks are given lockdep
1114 : * subclass of 1 in get_unbound_pool().
1115 : */
1116 1 : schedule_work(&pwq->unbound_release_work);
1117 : }
1118 :
1119 : /**
1120 : * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1121 : * @pwq: pool_workqueue to put (can be %NULL)
1122 : *
1123 : * put_pwq() with locking. This function also allows %NULL @pwq.
1124 : */
1125 12 : static void put_pwq_unlocked(struct pool_workqueue *pwq)
1126 : {
1127 12 : if (pwq) {
1128 : /*
1129 : * As both pwqs and pools are RCU protected, the
1130 : * following lock operations are safe.
1131 : */
1132 2 : raw_spin_lock_irq(&pwq->pool->lock);
1133 2 : put_pwq(pwq);
1134 2 : raw_spin_unlock_irq(&pwq->pool->lock);
1135 : }
1136 12 : }
1137 :
1138 40 : static void pwq_activate_delayed_work(struct work_struct *work)
1139 : {
1140 40 : struct pool_workqueue *pwq = get_work_pwq(work);
1141 :
1142 40 : trace_workqueue_activate_work(work);
1143 40 : if (list_empty(&pwq->pool->worklist))
1144 37 : pwq->pool->watchdog_ts = jiffies;
1145 40 : move_linked_works(work, &pwq->pool->worklist, NULL);
1146 40 : __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1147 40 : pwq->nr_active++;
1148 40 : }
1149 :
1150 40 : static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1151 : {
1152 40 : struct work_struct *work = list_first_entry(&pwq->delayed_works,
1153 : struct work_struct, entry);
1154 :
1155 40 : pwq_activate_delayed_work(work);
1156 40 : }
1157 :
1158 : /**
1159 : * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1160 : * @pwq: pwq of interest
1161 : * @color: color of work which left the queue
1162 : *
1163 : * A work either has completed or is removed from pending queue,
1164 : * decrement nr_in_flight of its pwq and handle workqueue flushing.
1165 : *
1166 : * CONTEXT:
1167 : * raw_spin_lock_irq(pool->lock).
1168 : */
1169 1906 : static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1170 : {
1171 : /* uncolored work items don't participate in flushing or nr_active */
1172 1906 : if (color == WORK_NO_COLOR)
1173 11 : goto out_put;
1174 :
1175 1895 : pwq->nr_in_flight[color]--;
1176 :
1177 1895 : pwq->nr_active--;
1178 1895 : if (!list_empty(&pwq->delayed_works)) {
1179 : /* one down, submit a delayed one */
1180 40 : if (pwq->nr_active < pwq->max_active)
1181 40 : pwq_activate_first_delayed(pwq);
1182 : }
1183 :
1184 : /* is flush in progress and are we at the flushing tip? */
1185 1895 : if (likely(pwq->flush_color != color))
1186 1895 : goto out_put;
1187 :
1188 : /* are there still in-flight works? */
1189 0 : if (pwq->nr_in_flight[color])
1190 0 : goto out_put;
1191 :
1192 : /* this pwq is done, clear flush_color */
1193 0 : pwq->flush_color = -1;
1194 :
1195 : /*
1196 : * If this was the last pwq, wake up the first flusher. It
1197 : * will handle the rest.
1198 : */
1199 0 : if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1200 0 : complete(&pwq->wq->first_flusher->done);
1201 0 : out_put:
1202 1906 : put_pwq(pwq);
1203 1906 : }
1204 :
1205 : /**
1206 : * try_to_grab_pending - steal work item from worklist and disable irq
1207 : * @work: work item to steal
1208 : * @is_dwork: @work is a delayed_work
1209 : * @flags: place to store irq state
1210 : *
1211 : * Try to grab PENDING bit of @work. This function can handle @work in any
1212 : * stable state - idle, on timer or on worklist.
1213 : *
1214 : * Return:
1215 : *
1216 : * ======== ================================================================
1217 : * 1 if @work was pending and we successfully stole PENDING
1218 : * 0 if @work was idle and we claimed PENDING
1219 : * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1220 : * -ENOENT if someone else is canceling @work, this state may persist
1221 : * for arbitrarily long
1222 : * ======== ================================================================
1223 : *
1224 : * Note:
1225 : * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1226 : * interrupted while holding PENDING and @work off queue, irq must be
1227 : * disabled on entry. This, combined with delayed_work->timer being
1228 : * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1229 : *
1230 : * On successful return, >= 0, irq is disabled and the caller is
1231 : * responsible for releasing it using local_irq_restore(*@flags).
1232 : *
1233 : * This function is safe to call from any context including IRQ handler.
1234 : */
1235 433 : static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1236 : unsigned long *flags)
1237 : {
1238 433 : struct worker_pool *pool;
1239 433 : struct pool_workqueue *pwq;
1240 :
1241 866 : local_irq_save(*flags);
1242 :
1243 : /* try to steal the timer if it exists */
1244 433 : if (is_dwork) {
1245 249 : struct delayed_work *dwork = to_delayed_work(work);
1246 :
1247 : /*
1248 : * dwork->timer is irqsafe. If del_timer() fails, it's
1249 : * guaranteed that the timer is not queued anywhere and not
1250 : * running on the local CPU.
1251 : */
1252 249 : if (likely(del_timer(&dwork->timer)))
1253 : return 1;
1254 : }
1255 :
1256 : /* try to claim PENDING the normal way */
1257 431 : if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1258 : return 0;
1259 :
1260 0 : rcu_read_lock();
1261 : /*
1262 : * The queueing is in progress, or it is already queued. Try to
1263 : * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1264 : */
1265 0 : pool = get_work_pool(work);
1266 0 : if (!pool)
1267 0 : goto fail;
1268 :
1269 0 : raw_spin_lock(&pool->lock);
1270 : /*
1271 : * work->data is guaranteed to point to pwq only while the work
1272 : * item is queued on pwq->wq, and both updating work->data to point
1273 : * to pwq on queueing and to pool on dequeueing are done under
1274 : * pwq->pool->lock. This in turn guarantees that, if work->data
1275 : * points to pwq which is associated with a locked pool, the work
1276 : * item is currently queued on that pool.
1277 : */
1278 0 : pwq = get_work_pwq(work);
1279 0 : if (pwq && pwq->pool == pool) {
1280 0 : debug_work_deactivate(work);
1281 :
1282 : /*
1283 : * A delayed work item cannot be grabbed directly because
1284 : * it might have linked NO_COLOR work items which, if left
1285 : * on the delayed_list, will confuse pwq->nr_active
1286 : * management later on and cause stall. Make sure the work
1287 : * item is activated before grabbing.
1288 : */
1289 0 : if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1290 0 : pwq_activate_delayed_work(work);
1291 :
1292 0 : list_del_init(&work->entry);
1293 0 : pwq_dec_nr_in_flight(pwq, get_work_color(work));
1294 :
1295 : /* work->data points to pwq iff queued, point to pool */
1296 0 : set_work_pool_and_keep_pending(work, pool->id);
1297 :
1298 0 : raw_spin_unlock(&pool->lock);
1299 0 : rcu_read_unlock();
1300 0 : return 1;
1301 : }
1302 0 : raw_spin_unlock(&pool->lock);
1303 0 : fail:
1304 0 : rcu_read_unlock();
1305 0 : local_irq_restore(*flags);
1306 0 : if (work_is_canceling(work))
1307 : return -ENOENT;
1308 0 : cpu_relax();
1309 0 : return -EAGAIN;
1310 : }
1311 :
1312 : /**
1313 : * insert_work - insert a work into a pool
1314 : * @pwq: pwq @work belongs to
1315 : * @work: work to insert
1316 : * @head: insertion point
1317 : * @extra_flags: extra WORK_STRUCT_* flags to set
1318 : *
1319 : * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1320 : * work_struct flags.
1321 : *
1322 : * CONTEXT:
1323 : * raw_spin_lock_irq(pool->lock).
1324 : */
1325 1905 : static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1326 : struct list_head *head, unsigned int extra_flags)
1327 : {
1328 1905 : struct worker_pool *pool = pwq->pool;
1329 :
1330 : /* record the work call stack in order to print it in KASAN reports */
1331 1905 : kasan_record_aux_stack(work);
1332 :
1333 : /* we own @work, set data and link */
1334 1905 : set_work_pwq(work, pwq, extra_flags);
1335 1906 : list_add_tail(&work->entry, head);
1336 1906 : get_pwq(pwq);
1337 :
1338 : /*
1339 : * Ensure either wq_worker_sleeping() sees the above
1340 : * list_add_tail() or we see zero nr_running to avoid workers lying
1341 : * around lazily while there are works to be processed.
1342 : */
1343 1906 : smp_mb();
1344 :
1345 1906 : if (__need_more_worker(pool))
1346 1839 : wake_up_worker(pool);
1347 1906 : }
1348 :
1349 : /*
1350 : * Test whether @work is being queued from another work executing on the
1351 : * same workqueue.
1352 : */
1353 0 : static bool is_chained_work(struct workqueue_struct *wq)
1354 : {
1355 0 : struct worker *worker;
1356 :
1357 0 : worker = current_wq_worker();
1358 : /*
1359 : * Return %true iff I'm a worker executing a work item on @wq. If
1360 : * I'm @worker, it's safe to dereference it without locking.
1361 : */
1362 0 : return worker && worker->current_pwq->wq == wq;
1363 : }
1364 :
1365 : /*
1366 : * When queueing an unbound work item to a wq, prefer local CPU if allowed
1367 : * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1368 : * avoid perturbing sensitive tasks.
1369 : */
1370 117 : static int wq_select_unbound_cpu(int cpu)
1371 : {
1372 117 : static bool printed_dbg_warning;
1373 117 : int new_cpu;
1374 :
1375 117 : if (likely(!wq_debug_force_rr_cpu)) {
1376 117 : if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1377 : return cpu;
1378 0 : } else if (!printed_dbg_warning) {
1379 0 : pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1380 0 : printed_dbg_warning = true;
1381 : }
1382 :
1383 0 : if (cpumask_empty(wq_unbound_cpumask))
1384 : return cpu;
1385 :
1386 0 : new_cpu = __this_cpu_read(wq_rr_cpu_last);
1387 0 : new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1388 0 : if (unlikely(new_cpu >= nr_cpu_ids)) {
1389 0 : new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1390 0 : if (unlikely(new_cpu >= nr_cpu_ids))
1391 : return cpu;
1392 : }
1393 0 : __this_cpu_write(wq_rr_cpu_last, new_cpu);
1394 :
1395 0 : return new_cpu;
1396 : }
1397 :
1398 1894 : static void __queue_work(int cpu, struct workqueue_struct *wq,
1399 : struct work_struct *work)
1400 : {
1401 1894 : struct pool_workqueue *pwq;
1402 1894 : struct worker_pool *last_pool;
1403 1894 : struct list_head *worklist;
1404 1894 : unsigned int work_flags;
1405 1894 : unsigned int req_cpu = cpu;
1406 :
1407 : /*
1408 : * While a work item is PENDING && off queue, a task trying to
1409 : * steal the PENDING will busy-loop waiting for it to either get
1410 : * queued or lose PENDING. Grabbing PENDING and queueing should
1411 : * happen with IRQ disabled.
1412 : */
1413 3788 : lockdep_assert_irqs_disabled();
1414 :
1415 1894 : debug_work_activate(work);
1416 :
1417 : /* if draining, only works from the same workqueue are allowed */
1418 1894 : if (unlikely(wq->flags & __WQ_DRAINING) &&
1419 0 : WARN_ON_ONCE(!is_chained_work(wq)))
1420 : return;
1421 1894 : rcu_read_lock();
1422 1894 : retry:
1423 : /* pwq which will be used unless @work is executing elsewhere */
1424 1894 : if (wq->flags & WQ_UNBOUND) {
1425 117 : if (req_cpu == WORK_CPU_UNBOUND)
1426 117 : cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1427 117 : pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1428 : } else {
1429 1777 : if (req_cpu == WORK_CPU_UNBOUND)
1430 1594 : cpu = raw_smp_processor_id();
1431 1777 : pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1432 : }
1433 :
1434 : /*
1435 : * If @work was previously on a different pool, it might still be
1436 : * running there, in which case the work needs to be queued on that
1437 : * pool to guarantee non-reentrancy.
1438 : */
1439 1894 : last_pool = get_work_pool(work);
1440 1894 : if (last_pool && last_pool != pwq->pool) {
1441 279 : struct worker *worker;
1442 :
1443 279 : raw_spin_lock(&last_pool->lock);
1444 :
1445 279 : worker = find_worker_executing_work(last_pool, work);
1446 :
1447 279 : if (worker && worker->current_pwq->wq == wq) {
1448 : pwq = worker->current_pwq;
1449 : } else {
1450 : /* meh... not running there, queue here */
1451 279 : raw_spin_unlock(&last_pool->lock);
1452 279 : raw_spin_lock(&pwq->pool->lock);
1453 : }
1454 : } else {
1455 1615 : raw_spin_lock(&pwq->pool->lock);
1456 : }
1457 :
1458 : /*
1459 : * pwq is determined and locked. For unbound pools, we could have
1460 : * raced with pwq release and it could already be dead. If its
1461 : * refcnt is zero, repeat pwq selection. Note that pwqs never die
1462 : * without another pwq replacing it in the numa_pwq_tbl or while
1463 : * work items are executing on it, so the retrying is guaranteed to
1464 : * make forward-progress.
1465 : */
1466 1895 : if (unlikely(!pwq->refcnt)) {
1467 0 : if (wq->flags & WQ_UNBOUND) {
1468 0 : raw_spin_unlock(&pwq->pool->lock);
1469 0 : cpu_relax();
1470 0 : goto retry;
1471 : }
1472 : /* oops */
1473 0 : WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1474 : wq->name, cpu);
1475 : }
1476 :
1477 : /* pwq determined, queue */
1478 1895 : trace_workqueue_queue_work(req_cpu, pwq, work);
1479 :
1480 1894 : if (WARN_ON(!list_empty(&work->entry)))
1481 0 : goto out;
1482 :
1483 1894 : pwq->nr_in_flight[pwq->work_color]++;
1484 1894 : work_flags = work_color_to_flags(pwq->work_color);
1485 :
1486 1894 : if (likely(pwq->nr_active < pwq->max_active)) {
1487 1854 : trace_workqueue_activate_work(work);
1488 1854 : pwq->nr_active++;
1489 1854 : worklist = &pwq->pool->worklist;
1490 1854 : if (list_empty(worklist))
1491 1701 : pwq->pool->watchdog_ts = jiffies;
1492 : } else {
1493 40 : work_flags |= WORK_STRUCT_DELAYED;
1494 40 : worklist = &pwq->delayed_works;
1495 : }
1496 :
1497 1894 : insert_work(pwq, work, worklist, work_flags);
1498 :
1499 1895 : out:
1500 1895 : raw_spin_unlock(&pwq->pool->lock);
1501 1895 : rcu_read_unlock();
1502 : }
1503 :
1504 : /**
1505 : * queue_work_on - queue work on specific cpu
1506 : * @cpu: CPU number to execute work on
1507 : * @wq: workqueue to use
1508 : * @work: work to queue
1509 : *
1510 : * We queue the work to a specific CPU, the caller must ensure it
1511 : * can't go away.
1512 : *
1513 : * Return: %false if @work was already on a queue, %true otherwise.
1514 : */
1515 848 : bool queue_work_on(int cpu, struct workqueue_struct *wq,
1516 : struct work_struct *work)
1517 : {
1518 848 : bool ret = false;
1519 848 : unsigned long flags;
1520 :
1521 1696 : local_irq_save(flags);
1522 :
1523 848 : if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1524 837 : __queue_work(cpu, wq, work);
1525 837 : ret = true;
1526 : }
1527 :
1528 848 : local_irq_restore(flags);
1529 848 : return ret;
1530 : }
1531 : EXPORT_SYMBOL(queue_work_on);
1532 :
1533 : /**
1534 : * workqueue_select_cpu_near - Select a CPU based on NUMA node
1535 : * @node: NUMA node ID that we want to select a CPU from
1536 : *
1537 : * This function will attempt to find a "random" cpu available on a given
1538 : * node. If there are no CPUs available on the given node it will return
1539 : * WORK_CPU_UNBOUND indicating that we should just schedule to any
1540 : * available CPU if we need to schedule this work.
1541 : */
1542 0 : static int workqueue_select_cpu_near(int node)
1543 : {
1544 0 : int cpu;
1545 :
1546 : /* No point in doing this if NUMA isn't enabled for workqueues */
1547 0 : if (!wq_numa_enabled)
1548 : return WORK_CPU_UNBOUND;
1549 :
1550 : /* Delay binding to CPU if node is not valid or online */
1551 0 : if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1552 0 : return WORK_CPU_UNBOUND;
1553 :
1554 : /* Use local node/cpu if we are already there */
1555 0 : cpu = raw_smp_processor_id();
1556 0 : if (node == cpu_to_node(cpu))
1557 : return cpu;
1558 :
1559 : /* Use "random" otherwise know as "first" online CPU of node */
1560 0 : cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1561 :
1562 : /* If CPU is valid return that, otherwise just defer */
1563 0 : return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1564 : }
1565 :
1566 : /**
1567 : * queue_work_node - queue work on a "random" cpu for a given NUMA node
1568 : * @node: NUMA node that we are targeting the work for
1569 : * @wq: workqueue to use
1570 : * @work: work to queue
1571 : *
1572 : * We queue the work to a "random" CPU within a given NUMA node. The basic
1573 : * idea here is to provide a way to somehow associate work with a given
1574 : * NUMA node.
1575 : *
1576 : * This function will only make a best effort attempt at getting this onto
1577 : * the right NUMA node. If no node is requested or the requested node is
1578 : * offline then we just fall back to standard queue_work behavior.
1579 : *
1580 : * Currently the "random" CPU ends up being the first available CPU in the
1581 : * intersection of cpu_online_mask and the cpumask of the node, unless we
1582 : * are running on the node. In that case we just use the current CPU.
1583 : *
1584 : * Return: %false if @work was already on a queue, %true otherwise.
1585 : */
1586 0 : bool queue_work_node(int node, struct workqueue_struct *wq,
1587 : struct work_struct *work)
1588 : {
1589 0 : unsigned long flags;
1590 0 : bool ret = false;
1591 :
1592 : /*
1593 : * This current implementation is specific to unbound workqueues.
1594 : * Specifically we only return the first available CPU for a given
1595 : * node instead of cycling through individual CPUs within the node.
1596 : *
1597 : * If this is used with a per-cpu workqueue then the logic in
1598 : * workqueue_select_cpu_near would need to be updated to allow for
1599 : * some round robin type logic.
1600 : */
1601 0 : WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1602 :
1603 0 : local_irq_save(flags);
1604 :
1605 0 : if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1606 0 : int cpu = workqueue_select_cpu_near(node);
1607 :
1608 0 : __queue_work(cpu, wq, work);
1609 0 : ret = true;
1610 : }
1611 :
1612 0 : local_irq_restore(flags);
1613 0 : return ret;
1614 : }
1615 : EXPORT_SYMBOL_GPL(queue_work_node);
1616 :
1617 539 : void delayed_work_timer_fn(struct timer_list *t)
1618 : {
1619 539 : struct delayed_work *dwork = from_timer(dwork, t, timer);
1620 :
1621 : /* should have been called from irqsafe timer with irq already off */
1622 539 : __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1623 541 : }
1624 : EXPORT_SYMBOL(delayed_work_timer_fn);
1625 :
1626 879 : static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1627 : struct delayed_work *dwork, unsigned long delay)
1628 : {
1629 879 : struct timer_list *timer = &dwork->timer;
1630 879 : struct work_struct *work = &dwork->work;
1631 :
1632 879 : WARN_ON_ONCE(!wq);
1633 879 : WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1634 879 : WARN_ON_ONCE(timer_pending(timer));
1635 879 : WARN_ON_ONCE(!list_empty(&work->entry));
1636 :
1637 : /*
1638 : * If @delay is 0, queue @dwork->work immediately. This is for
1639 : * both optimization and correctness. The earliest @timer can
1640 : * expire is on the closest next tick and delayed_work users depend
1641 : * on that there's no such delay when @delay is 0.
1642 : */
1643 879 : if (!delay) {
1644 320 : __queue_work(cpu, wq, &dwork->work);
1645 320 : return;
1646 : }
1647 :
1648 559 : dwork->wq = wq;
1649 559 : dwork->cpu = cpu;
1650 559 : timer->expires = jiffies + delay;
1651 :
1652 559 : if (unlikely(cpu != WORK_CPU_UNBOUND))
1653 110 : add_timer_on(timer, cpu);
1654 : else
1655 449 : add_timer(timer);
1656 : }
1657 :
1658 : /**
1659 : * queue_delayed_work_on - queue work on specific CPU after delay
1660 : * @cpu: CPU number to execute work on
1661 : * @wq: workqueue to use
1662 : * @dwork: work to queue
1663 : * @delay: number of jiffies to wait before queueing
1664 : *
1665 : * Return: %false if @work was already on a queue, %true otherwise. If
1666 : * @delay is zero and @dwork is idle, it will be scheduled for immediate
1667 : * execution.
1668 : */
1669 635 : bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1670 : struct delayed_work *dwork, unsigned long delay)
1671 : {
1672 635 : struct work_struct *work = &dwork->work;
1673 635 : bool ret = false;
1674 635 : unsigned long flags;
1675 :
1676 : /* read the comment in __queue_work() */
1677 1270 : local_irq_save(flags);
1678 :
1679 635 : if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1680 632 : __queue_delayed_work(cpu, wq, dwork, delay);
1681 632 : ret = true;
1682 : }
1683 :
1684 635 : local_irq_restore(flags);
1685 635 : return ret;
1686 : }
1687 : EXPORT_SYMBOL(queue_delayed_work_on);
1688 :
1689 : /**
1690 : * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1691 : * @cpu: CPU number to execute work on
1692 : * @wq: workqueue to use
1693 : * @dwork: work to queue
1694 : * @delay: number of jiffies to wait before queueing
1695 : *
1696 : * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1697 : * modify @dwork's timer so that it expires after @delay. If @delay is
1698 : * zero, @work is guaranteed to be scheduled immediately regardless of its
1699 : * current state.
1700 : *
1701 : * Return: %false if @dwork was idle and queued, %true if @dwork was
1702 : * pending and its timer was modified.
1703 : *
1704 : * This function is safe to call from any context including IRQ handler.
1705 : * See try_to_grab_pending() for details.
1706 : */
1707 247 : bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1708 : struct delayed_work *dwork, unsigned long delay)
1709 : {
1710 247 : unsigned long flags;
1711 247 : int ret;
1712 :
1713 247 : do {
1714 247 : ret = try_to_grab_pending(&dwork->work, true, &flags);
1715 247 : } while (unlikely(ret == -EAGAIN));
1716 :
1717 247 : if (likely(ret >= 0)) {
1718 247 : __queue_delayed_work(cpu, wq, dwork, delay);
1719 247 : local_irq_restore(flags);
1720 : }
1721 :
1722 : /* -ENOENT from try_to_grab_pending() becomes %true */
1723 247 : return ret;
1724 : }
1725 : EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1726 :
1727 191 : static void rcu_work_rcufn(struct rcu_head *rcu)
1728 : {
1729 191 : struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1730 :
1731 : /* read the comment in __queue_work() */
1732 191 : local_irq_disable();
1733 191 : __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1734 191 : local_irq_enable();
1735 191 : }
1736 :
1737 : /**
1738 : * queue_rcu_work - queue work after a RCU grace period
1739 : * @wq: workqueue to use
1740 : * @rwork: work to queue
1741 : *
1742 : * Return: %false if @rwork was already pending, %true otherwise. Note
1743 : * that a full RCU grace period is guaranteed only after a %true return.
1744 : * While @rwork is guaranteed to be executed after a %false return, the
1745 : * execution may happen before a full RCU grace period has passed.
1746 : */
1747 191 : bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1748 : {
1749 191 : struct work_struct *work = &rwork->work;
1750 :
1751 191 : if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1752 191 : rwork->wq = wq;
1753 191 : call_rcu(&rwork->rcu, rcu_work_rcufn);
1754 191 : return true;
1755 : }
1756 :
1757 : return false;
1758 : }
1759 : EXPORT_SYMBOL(queue_rcu_work);
1760 :
1761 : /**
1762 : * worker_enter_idle - enter idle state
1763 : * @worker: worker which is entering idle state
1764 : *
1765 : * @worker is entering idle state. Update stats and idle timer if
1766 : * necessary.
1767 : *
1768 : * LOCKING:
1769 : * raw_spin_lock_irq(pool->lock).
1770 : */
1771 1691 : static void worker_enter_idle(struct worker *worker)
1772 : {
1773 1691 : struct worker_pool *pool = worker->pool;
1774 :
1775 1691 : if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1776 3382 : WARN_ON_ONCE(!list_empty(&worker->entry) &&
1777 : (worker->hentry.next || worker->hentry.pprev)))
1778 : return;
1779 :
1780 : /* can't use worker_set_flags(), also called from create_worker() */
1781 1691 : worker->flags |= WORKER_IDLE;
1782 1691 : pool->nr_idle++;
1783 1691 : worker->last_active = jiffies;
1784 :
1785 : /* idle_list is LIFO */
1786 1691 : list_add(&worker->entry, &pool->idle_list);
1787 :
1788 1691 : if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1789 3 : mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1790 :
1791 : /*
1792 : * Sanity check nr_running. Because unbind_workers() releases
1793 : * pool->lock between setting %WORKER_UNBOUND and zapping
1794 : * nr_running, the warning may trigger spuriously. Check iff
1795 : * unbind is not in progress.
1796 : */
1797 3234 : WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1798 : pool->nr_workers == pool->nr_idle &&
1799 : atomic_read(&pool->nr_running));
1800 : }
1801 :
1802 : /**
1803 : * worker_leave_idle - leave idle state
1804 : * @worker: worker which is leaving idle state
1805 : *
1806 : * @worker is leaving idle state. Update stats.
1807 : *
1808 : * LOCKING:
1809 : * raw_spin_lock_irq(pool->lock).
1810 : */
1811 1669 : static void worker_leave_idle(struct worker *worker)
1812 : {
1813 1669 : struct worker_pool *pool = worker->pool;
1814 :
1815 1669 : if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1816 : return;
1817 1669 : worker_clr_flags(worker, WORKER_IDLE);
1818 1670 : pool->nr_idle--;
1819 1670 : list_del_init(&worker->entry);
1820 : }
1821 :
1822 28 : static struct worker *alloc_worker(int node)
1823 : {
1824 28 : struct worker *worker;
1825 :
1826 28 : worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1827 28 : if (worker) {
1828 28 : INIT_LIST_HEAD(&worker->entry);
1829 28 : INIT_LIST_HEAD(&worker->scheduled);
1830 28 : INIT_LIST_HEAD(&worker->node);
1831 : /* on creation a worker is in !idle && prep state */
1832 28 : worker->flags = WORKER_PREP;
1833 : }
1834 28 : return worker;
1835 : }
1836 :
1837 : /**
1838 : * worker_attach_to_pool() - attach a worker to a pool
1839 : * @worker: worker to be attached
1840 : * @pool: the target pool
1841 : *
1842 : * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1843 : * cpu-binding of @worker are kept coordinated with the pool across
1844 : * cpu-[un]hotplugs.
1845 : */
1846 21 : static void worker_attach_to_pool(struct worker *worker,
1847 : struct worker_pool *pool)
1848 : {
1849 21 : mutex_lock(&wq_pool_attach_mutex);
1850 :
1851 : /*
1852 : * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1853 : * stable across this function. See the comments above the flag
1854 : * definition for details.
1855 : */
1856 21 : if (pool->flags & POOL_DISASSOCIATED)
1857 9 : worker->flags |= WORKER_UNBOUND;
1858 : else
1859 12 : kthread_set_per_cpu(worker->task, pool->cpu);
1860 :
1861 21 : if (worker->rescue_wq)
1862 0 : set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1863 :
1864 21 : list_add_tail(&worker->node, &pool->workers);
1865 21 : worker->pool = pool;
1866 :
1867 21 : mutex_unlock(&wq_pool_attach_mutex);
1868 21 : }
1869 :
1870 : /**
1871 : * worker_detach_from_pool() - detach a worker from its pool
1872 : * @worker: worker which is attached to its pool
1873 : *
1874 : * Undo the attaching which had been done in worker_attach_to_pool(). The
1875 : * caller worker shouldn't access to the pool after detached except it has
1876 : * other reference to the pool.
1877 : */
1878 0 : static void worker_detach_from_pool(struct worker *worker)
1879 : {
1880 0 : struct worker_pool *pool = worker->pool;
1881 0 : struct completion *detach_completion = NULL;
1882 :
1883 0 : mutex_lock(&wq_pool_attach_mutex);
1884 :
1885 0 : kthread_set_per_cpu(worker->task, -1);
1886 0 : list_del(&worker->node);
1887 0 : worker->pool = NULL;
1888 :
1889 0 : if (list_empty(&pool->workers))
1890 0 : detach_completion = pool->detach_completion;
1891 0 : mutex_unlock(&wq_pool_attach_mutex);
1892 :
1893 : /* clear leftover flags without pool->lock after it is detached */
1894 0 : worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1895 :
1896 0 : if (detach_completion)
1897 0 : complete(detach_completion);
1898 0 : }
1899 :
1900 : /**
1901 : * create_worker - create a new workqueue worker
1902 : * @pool: pool the new worker will belong to
1903 : *
1904 : * Create and start a new worker which is attached to @pool.
1905 : *
1906 : * CONTEXT:
1907 : * Might sleep. Does GFP_KERNEL allocations.
1908 : *
1909 : * Return:
1910 : * Pointer to the newly created worker.
1911 : */
1912 21 : static struct worker *create_worker(struct worker_pool *pool)
1913 : {
1914 21 : struct worker *worker = NULL;
1915 21 : int id = -1;
1916 21 : char id_buf[16];
1917 :
1918 : /* ID is needed to determine kthread name */
1919 21 : id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1920 21 : if (id < 0)
1921 0 : goto fail;
1922 :
1923 21 : worker = alloc_worker(pool->node);
1924 21 : if (!worker)
1925 0 : goto fail;
1926 :
1927 21 : worker->id = id;
1928 :
1929 21 : if (pool->cpu >= 0)
1930 18 : snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1931 18 : pool->attrs->nice < 0 ? "H" : "");
1932 : else
1933 3 : snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1934 :
1935 21 : worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1936 : "kworker/%s", id_buf);
1937 21 : if (IS_ERR(worker->task))
1938 0 : goto fail;
1939 :
1940 21 : set_user_nice(worker->task, pool->attrs->nice);
1941 21 : kthread_bind_mask(worker->task, pool->attrs->cpumask);
1942 :
1943 : /* successful, attach the worker to the pool */
1944 21 : worker_attach_to_pool(worker, pool);
1945 :
1946 : /* start the newly created worker */
1947 21 : raw_spin_lock_irq(&pool->lock);
1948 21 : worker->pool->nr_workers++;
1949 21 : worker_enter_idle(worker);
1950 21 : wake_up_process(worker->task);
1951 21 : raw_spin_unlock_irq(&pool->lock);
1952 :
1953 21 : return worker;
1954 :
1955 0 : fail:
1956 0 : if (id >= 0)
1957 0 : ida_simple_remove(&pool->worker_ida, id);
1958 0 : kfree(worker);
1959 0 : return NULL;
1960 : }
1961 :
1962 : /**
1963 : * destroy_worker - destroy a workqueue worker
1964 : * @worker: worker to be destroyed
1965 : *
1966 : * Destroy @worker and adjust @pool stats accordingly. The worker should
1967 : * be idle.
1968 : *
1969 : * CONTEXT:
1970 : * raw_spin_lock_irq(pool->lock).
1971 : */
1972 0 : static void destroy_worker(struct worker *worker)
1973 : {
1974 0 : struct worker_pool *pool = worker->pool;
1975 :
1976 0 : lockdep_assert_held(&pool->lock);
1977 :
1978 : /* sanity check frenzy */
1979 0 : if (WARN_ON(worker->current_work) ||
1980 0 : WARN_ON(!list_empty(&worker->scheduled)) ||
1981 0 : WARN_ON(!(worker->flags & WORKER_IDLE)))
1982 : return;
1983 :
1984 0 : pool->nr_workers--;
1985 0 : pool->nr_idle--;
1986 :
1987 0 : list_del_init(&worker->entry);
1988 0 : worker->flags |= WORKER_DIE;
1989 0 : wake_up_process(worker->task);
1990 : }
1991 :
1992 0 : static void idle_worker_timeout(struct timer_list *t)
1993 : {
1994 0 : struct worker_pool *pool = from_timer(pool, t, idle_timer);
1995 :
1996 0 : raw_spin_lock_irq(&pool->lock);
1997 :
1998 0 : while (too_many_workers(pool)) {
1999 0 : struct worker *worker;
2000 0 : unsigned long expires;
2001 :
2002 : /* idle_list is kept in LIFO order, check the last one */
2003 0 : worker = list_entry(pool->idle_list.prev, struct worker, entry);
2004 0 : expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2005 :
2006 0 : if (time_before(jiffies, expires)) {
2007 0 : mod_timer(&pool->idle_timer, expires);
2008 0 : break;
2009 : }
2010 :
2011 0 : destroy_worker(worker);
2012 : }
2013 :
2014 0 : raw_spin_unlock_irq(&pool->lock);
2015 0 : }
2016 :
2017 0 : static void send_mayday(struct work_struct *work)
2018 : {
2019 0 : struct pool_workqueue *pwq = get_work_pwq(work);
2020 0 : struct workqueue_struct *wq = pwq->wq;
2021 :
2022 0 : lockdep_assert_held(&wq_mayday_lock);
2023 :
2024 0 : if (!wq->rescuer)
2025 : return;
2026 :
2027 : /* mayday mayday mayday */
2028 0 : if (list_empty(&pwq->mayday_node)) {
2029 : /*
2030 : * If @pwq is for an unbound wq, its base ref may be put at
2031 : * any time due to an attribute change. Pin @pwq until the
2032 : * rescuer is done with it.
2033 : */
2034 0 : get_pwq(pwq);
2035 0 : list_add_tail(&pwq->mayday_node, &wq->maydays);
2036 0 : wake_up_process(wq->rescuer->task);
2037 : }
2038 : }
2039 :
2040 1 : static void pool_mayday_timeout(struct timer_list *t)
2041 : {
2042 1 : struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2043 1 : struct work_struct *work;
2044 :
2045 1 : raw_spin_lock_irq(&pool->lock);
2046 1 : raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
2047 :
2048 1 : if (need_to_create_worker(pool)) {
2049 : /*
2050 : * We've been trying to create a new worker but
2051 : * haven't been successful. We might be hitting an
2052 : * allocation deadlock. Send distress signals to
2053 : * rescuers.
2054 : */
2055 0 : list_for_each_entry(work, &pool->worklist, entry)
2056 0 : send_mayday(work);
2057 : }
2058 :
2059 1 : raw_spin_unlock(&wq_mayday_lock);
2060 1 : raw_spin_unlock_irq(&pool->lock);
2061 :
2062 1 : mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2063 1 : }
2064 :
2065 : /**
2066 : * maybe_create_worker - create a new worker if necessary
2067 : * @pool: pool to create a new worker for
2068 : *
2069 : * Create a new worker for @pool if necessary. @pool is guaranteed to
2070 : * have at least one idle worker on return from this function. If
2071 : * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2072 : * sent to all rescuers with works scheduled on @pool to resolve
2073 : * possible allocation deadlock.
2074 : *
2075 : * On return, need_to_create_worker() is guaranteed to be %false and
2076 : * may_start_working() %true.
2077 : *
2078 : * LOCKING:
2079 : * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2080 : * multiple times. Does GFP_KERNEL allocations. Called only from
2081 : * manager.
2082 : */
2083 12 : static void maybe_create_worker(struct worker_pool *pool)
2084 : __releases(&pool->lock)
2085 : __acquires(&pool->lock)
2086 : {
2087 12 : restart:
2088 12 : raw_spin_unlock_irq(&pool->lock);
2089 :
2090 : /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2091 12 : mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2092 :
2093 12 : while (true) {
2094 12 : if (create_worker(pool) || !need_to_create_worker(pool))
2095 : break;
2096 :
2097 0 : schedule_timeout_interruptible(CREATE_COOLDOWN);
2098 :
2099 0 : if (!need_to_create_worker(pool))
2100 : break;
2101 : }
2102 :
2103 12 : del_timer_sync(&pool->mayday_timer);
2104 12 : raw_spin_lock_irq(&pool->lock);
2105 : /*
2106 : * This is necessary even after a new worker was just successfully
2107 : * created as @pool->lock was dropped and the new worker might have
2108 : * already become busy.
2109 : */
2110 12 : if (need_to_create_worker(pool))
2111 0 : goto restart;
2112 12 : }
2113 :
2114 : /**
2115 : * manage_workers - manage worker pool
2116 : * @worker: self
2117 : *
2118 : * Assume the manager role and manage the worker pool @worker belongs
2119 : * to. At any given time, there can be only zero or one manager per
2120 : * pool. The exclusion is handled automatically by this function.
2121 : *
2122 : * The caller can safely start processing works on false return. On
2123 : * true return, it's guaranteed that need_to_create_worker() is false
2124 : * and may_start_working() is true.
2125 : *
2126 : * CONTEXT:
2127 : * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2128 : * multiple times. Does GFP_KERNEL allocations.
2129 : *
2130 : * Return:
2131 : * %false if the pool doesn't need management and the caller can safely
2132 : * start processing works, %true if management function was performed and
2133 : * the conditions that the caller verified before calling the function may
2134 : * no longer be true.
2135 : */
2136 12 : static bool manage_workers(struct worker *worker)
2137 : {
2138 12 : struct worker_pool *pool = worker->pool;
2139 :
2140 12 : if (pool->flags & POOL_MANAGER_ACTIVE)
2141 : return false;
2142 :
2143 12 : pool->flags |= POOL_MANAGER_ACTIVE;
2144 12 : pool->manager = worker;
2145 :
2146 12 : maybe_create_worker(pool);
2147 :
2148 12 : pool->manager = NULL;
2149 12 : pool->flags &= ~POOL_MANAGER_ACTIVE;
2150 12 : rcuwait_wake_up(&manager_wait);
2151 12 : return true;
2152 : }
2153 :
2154 : /**
2155 : * process_one_work - process single work
2156 : * @worker: self
2157 : * @work: work to process
2158 : *
2159 : * Process @work. This function contains all the logics necessary to
2160 : * process a single work including synchronization against and
2161 : * interaction with other workers on the same cpu, queueing and
2162 : * flushing. As long as context requirement is met, any worker can
2163 : * call this function to process a work.
2164 : *
2165 : * CONTEXT:
2166 : * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2167 : */
2168 1906 : static void process_one_work(struct worker *worker, struct work_struct *work)
2169 : __releases(&pool->lock)
2170 : __acquires(&pool->lock)
2171 : {
2172 1906 : struct pool_workqueue *pwq = get_work_pwq(work);
2173 1906 : struct worker_pool *pool = worker->pool;
2174 1906 : bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2175 1906 : int work_color;
2176 1906 : struct worker *collision;
2177 : #ifdef CONFIG_LOCKDEP
2178 : /*
2179 : * It is permissible to free the struct work_struct from
2180 : * inside the function that is called from it, this we need to
2181 : * take into account for lockdep too. To avoid bogus "held
2182 : * lock freed" warnings as well as problems when looking into
2183 : * work->lockdep_map, make a copy and use that here.
2184 : */
2185 1906 : struct lockdep_map lockdep_map;
2186 :
2187 1906 : lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2188 : #endif
2189 : /* ensure we're on the correct CPU */
2190 3812 : WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2191 : raw_smp_processor_id() != pool->cpu);
2192 :
2193 : /*
2194 : * A single work shouldn't be executed concurrently by
2195 : * multiple workers on a single cpu. Check whether anyone is
2196 : * already processing the work. If so, defer the work to the
2197 : * currently executing one.
2198 : */
2199 1906 : collision = find_worker_executing_work(pool, work);
2200 1906 : if (unlikely(collision)) {
2201 0 : move_linked_works(work, &collision->scheduled, NULL);
2202 0 : return;
2203 : }
2204 :
2205 : /* claim and dequeue */
2206 1906 : debug_work_deactivate(work);
2207 1906 : hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2208 1906 : worker->current_work = work;
2209 1906 : worker->current_func = work->func;
2210 1906 : worker->current_pwq = pwq;
2211 1906 : work_color = get_work_color(work);
2212 :
2213 : /*
2214 : * Record wq name for cmdline and debug reporting, may get
2215 : * overridden through set_worker_desc().
2216 : */
2217 1906 : strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2218 :
2219 1906 : list_del_init(&work->entry);
2220 :
2221 : /*
2222 : * CPU intensive works don't participate in concurrency management.
2223 : * They're the scheduler's responsibility. This takes @worker out
2224 : * of concurrency management and the next code block will chain
2225 : * execution of the pending work items.
2226 : */
2227 1906 : if (unlikely(cpu_intensive))
2228 0 : worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2229 :
2230 : /*
2231 : * Wake up another worker if necessary. The condition is always
2232 : * false for normal per-cpu workers since nr_running would always
2233 : * be >= 1 at this point. This is used to chain execution of the
2234 : * pending work items for WORKER_NOT_RUNNING workers such as the
2235 : * UNBOUND and CPU_INTENSIVE ones.
2236 : */
2237 1906 : if (need_more_worker(pool))
2238 2 : wake_up_worker(pool);
2239 :
2240 : /*
2241 : * Record the last pool and clear PENDING which should be the last
2242 : * update to @work. Also, do this inside @pool->lock so that
2243 : * PENDING and queued state changes happen together while IRQ is
2244 : * disabled.
2245 : */
2246 1905 : set_work_pool_and_clear_pending(work, pool->id);
2247 :
2248 1906 : raw_spin_unlock_irq(&pool->lock);
2249 :
2250 1906 : lock_map_acquire(&pwq->wq->lockdep_map);
2251 1906 : lock_map_acquire(&lockdep_map);
2252 : /*
2253 : * Strictly speaking we should mark the invariant state without holding
2254 : * any locks, that is, before these two lock_map_acquire()'s.
2255 : *
2256 : * However, that would result in:
2257 : *
2258 : * A(W1)
2259 : * WFC(C)
2260 : * A(W1)
2261 : * C(C)
2262 : *
2263 : * Which would create W1->C->W1 dependencies, even though there is no
2264 : * actual deadlock possible. There are two solutions, using a
2265 : * read-recursive acquire on the work(queue) 'locks', but this will then
2266 : * hit the lockdep limitation on recursive locks, or simply discard
2267 : * these locks.
2268 : *
2269 : * AFAICT there is no possible deadlock scenario between the
2270 : * flush_work() and complete() primitives (except for single-threaded
2271 : * workqueues), so hiding them isn't a problem.
2272 : */
2273 1906 : lockdep_invariant_state(true);
2274 1906 : trace_workqueue_execute_start(work);
2275 1905 : worker->current_func(work);
2276 : /*
2277 : * While we must be careful to not use "work" after this, the trace
2278 : * point will only record its address.
2279 : */
2280 1906 : trace_workqueue_execute_end(work, worker->current_func);
2281 1906 : lock_map_release(&lockdep_map);
2282 1906 : lock_map_release(&pwq->wq->lockdep_map);
2283 :
2284 1906 : if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2285 0 : pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2286 : " last function: %ps\n",
2287 : current->comm, preempt_count(), task_pid_nr(current),
2288 : worker->current_func);
2289 0 : debug_show_held_locks(current);
2290 0 : dump_stack();
2291 : }
2292 :
2293 : /*
2294 : * The following prevents a kworker from hogging CPU on !PREEMPTION
2295 : * kernels, where a requeueing work item waiting for something to
2296 : * happen could deadlock with stop_machine as such work item could
2297 : * indefinitely requeue itself while all other CPUs are trapped in
2298 : * stop_machine. At the same time, report a quiescent RCU state so
2299 : * the same condition doesn't freeze RCU.
2300 : */
2301 1906 : cond_resched();
2302 :
2303 1906 : raw_spin_lock_irq(&pool->lock);
2304 :
2305 : /* clear cpu intensive status */
2306 1906 : if (unlikely(cpu_intensive))
2307 0 : worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2308 :
2309 : /* tag the worker for identification in schedule() */
2310 1906 : worker->last_func = worker->current_func;
2311 :
2312 : /* we're done with it, release */
2313 1906 : hash_del(&worker->hentry);
2314 1906 : worker->current_work = NULL;
2315 1906 : worker->current_func = NULL;
2316 1906 : worker->current_pwq = NULL;
2317 1906 : pwq_dec_nr_in_flight(pwq, work_color);
2318 : }
2319 :
2320 : /**
2321 : * process_scheduled_works - process scheduled works
2322 : * @worker: self
2323 : *
2324 : * Process all scheduled works. Please note that the scheduled list
2325 : * may change while processing a work, so this function repeatedly
2326 : * fetches a work from the top and executes it.
2327 : *
2328 : * CONTEXT:
2329 : * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2330 : * multiple times.
2331 : */
2332 11 : static void process_scheduled_works(struct worker *worker)
2333 : {
2334 30 : while (!list_empty(&worker->scheduled)) {
2335 19 : struct work_struct *work = list_first_entry(&worker->scheduled,
2336 : struct work_struct, entry);
2337 19 : process_one_work(worker, work);
2338 : }
2339 11 : }
2340 :
2341 28 : static void set_pf_worker(bool val)
2342 : {
2343 28 : mutex_lock(&wq_pool_attach_mutex);
2344 28 : if (val)
2345 28 : current->flags |= PF_WQ_WORKER;
2346 : else
2347 0 : current->flags &= ~PF_WQ_WORKER;
2348 28 : mutex_unlock(&wq_pool_attach_mutex);
2349 28 : }
2350 :
2351 : /**
2352 : * worker_thread - the worker thread function
2353 : * @__worker: self
2354 : *
2355 : * The worker thread function. All workers belong to a worker_pool -
2356 : * either a per-cpu one or dynamic unbound one. These workers process all
2357 : * work items regardless of their specific target workqueue. The only
2358 : * exception is work items which belong to workqueues with a rescuer which
2359 : * will be explained in rescuer_thread().
2360 : *
2361 : * Return: 0
2362 : */
2363 21 : static int worker_thread(void *__worker)
2364 : {
2365 21 : struct worker *worker = __worker;
2366 21 : struct worker_pool *pool = worker->pool;
2367 :
2368 : /* tell the scheduler that this is a workqueue worker */
2369 21 : set_pf_worker(true);
2370 1669 : woke_up:
2371 1669 : raw_spin_lock_irq(&pool->lock);
2372 :
2373 : /* am I supposed to die? */
2374 1670 : if (unlikely(worker->flags & WORKER_DIE)) {
2375 0 : raw_spin_unlock_irq(&pool->lock);
2376 0 : WARN_ON_ONCE(!list_empty(&worker->entry));
2377 0 : set_pf_worker(false);
2378 :
2379 0 : set_task_comm(worker->task, "kworker/dying");
2380 0 : ida_simple_remove(&pool->worker_ida, worker->id);
2381 0 : worker_detach_from_pool(worker);
2382 0 : kfree(worker);
2383 0 : return 0;
2384 : }
2385 :
2386 1670 : worker_leave_idle(worker);
2387 1682 : recheck:
2388 : /* no more worker necessary? */
2389 1682 : if (!need_more_worker(pool))
2390 31 : goto sleep;
2391 :
2392 : /* do we need to manage? */
2393 1651 : if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2394 12 : goto recheck;
2395 :
2396 : /*
2397 : * ->scheduled list can only be filled while a worker is
2398 : * preparing to process a work or actually processing it.
2399 : * Make sure nobody diddled with it while I was sleeping.
2400 : */
2401 1639 : WARN_ON_ONCE(!list_empty(&worker->scheduled));
2402 :
2403 : /*
2404 : * Finish PREP stage. We're guaranteed to have at least one idle
2405 : * worker or that someone else has already assumed the manager
2406 : * role. This is where @worker starts participating in concurrency
2407 : * management if applicable and concurrency management is restored
2408 : * after being rebound. See rebind_workers() for details.
2409 : */
2410 1639 : worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2411 :
2412 1895 : do {
2413 1895 : struct work_struct *work =
2414 1895 : list_first_entry(&pool->worklist,
2415 : struct work_struct, entry);
2416 :
2417 1895 : pool->watchdog_ts = jiffies;
2418 :
2419 1895 : if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2420 : /* optimization path, not strictly necessary */
2421 1887 : process_one_work(worker, work);
2422 1887 : if (unlikely(!list_empty(&worker->scheduled)))
2423 3 : process_scheduled_works(worker);
2424 : } else {
2425 8 : move_linked_works(work, &worker->scheduled, NULL);
2426 8 : process_scheduled_works(worker);
2427 : }
2428 1895 : } while (keep_working(pool));
2429 :
2430 1639 : worker_set_flags(worker, WORKER_PREP);
2431 1670 : sleep:
2432 : /*
2433 : * pool->lock is held and there's no work to process and no need to
2434 : * manage, sleep. Workers are woken up only while holding
2435 : * pool->lock or from local cpu, so setting the current state
2436 : * before releasing pool->lock is enough to prevent losing any
2437 : * event.
2438 : */
2439 1670 : worker_enter_idle(worker);
2440 1670 : __set_current_state(TASK_IDLE);
2441 1670 : raw_spin_unlock_irq(&pool->lock);
2442 1670 : schedule();
2443 1648 : goto woke_up;
2444 : }
2445 :
2446 : /**
2447 : * rescuer_thread - the rescuer thread function
2448 : * @__rescuer: self
2449 : *
2450 : * Workqueue rescuer thread function. There's one rescuer for each
2451 : * workqueue which has WQ_MEM_RECLAIM set.
2452 : *
2453 : * Regular work processing on a pool may block trying to create a new
2454 : * worker which uses GFP_KERNEL allocation which has slight chance of
2455 : * developing into deadlock if some works currently on the same queue
2456 : * need to be processed to satisfy the GFP_KERNEL allocation. This is
2457 : * the problem rescuer solves.
2458 : *
2459 : * When such condition is possible, the pool summons rescuers of all
2460 : * workqueues which have works queued on the pool and let them process
2461 : * those works so that forward progress can be guaranteed.
2462 : *
2463 : * This should happen rarely.
2464 : *
2465 : * Return: 0
2466 : */
2467 7 : static int rescuer_thread(void *__rescuer)
2468 : {
2469 7 : struct worker *rescuer = __rescuer;
2470 7 : struct workqueue_struct *wq = rescuer->rescue_wq;
2471 7 : struct list_head *scheduled = &rescuer->scheduled;
2472 7 : bool should_stop;
2473 :
2474 7 : set_user_nice(current, RESCUER_NICE_LEVEL);
2475 :
2476 : /*
2477 : * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2478 : * doesn't participate in concurrency management.
2479 : */
2480 7 : set_pf_worker(true);
2481 7 : repeat:
2482 7 : set_current_state(TASK_IDLE);
2483 :
2484 : /*
2485 : * By the time the rescuer is requested to stop, the workqueue
2486 : * shouldn't have any work pending, but @wq->maydays may still have
2487 : * pwq(s) queued. This can happen by non-rescuer workers consuming
2488 : * all the work items before the rescuer got to them. Go through
2489 : * @wq->maydays processing before acting on should_stop so that the
2490 : * list is always empty on exit.
2491 : */
2492 7 : should_stop = kthread_should_stop();
2493 :
2494 : /* see whether any pwq is asking for help */
2495 7 : raw_spin_lock_irq(&wq_mayday_lock);
2496 :
2497 7 : while (!list_empty(&wq->maydays)) {
2498 0 : struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2499 : struct pool_workqueue, mayday_node);
2500 0 : struct worker_pool *pool = pwq->pool;
2501 0 : struct work_struct *work, *n;
2502 0 : bool first = true;
2503 :
2504 0 : __set_current_state(TASK_RUNNING);
2505 0 : list_del_init(&pwq->mayday_node);
2506 :
2507 0 : raw_spin_unlock_irq(&wq_mayday_lock);
2508 :
2509 0 : worker_attach_to_pool(rescuer, pool);
2510 :
2511 0 : raw_spin_lock_irq(&pool->lock);
2512 :
2513 : /*
2514 : * Slurp in all works issued via this workqueue and
2515 : * process'em.
2516 : */
2517 0 : WARN_ON_ONCE(!list_empty(scheduled));
2518 0 : list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2519 0 : if (get_work_pwq(work) == pwq) {
2520 0 : if (first)
2521 0 : pool->watchdog_ts = jiffies;
2522 0 : move_linked_works(work, scheduled, &n);
2523 : }
2524 0 : first = false;
2525 : }
2526 :
2527 0 : if (!list_empty(scheduled)) {
2528 0 : process_scheduled_works(rescuer);
2529 :
2530 : /*
2531 : * The above execution of rescued work items could
2532 : * have created more to rescue through
2533 : * pwq_activate_first_delayed() or chained
2534 : * queueing. Let's put @pwq back on mayday list so
2535 : * that such back-to-back work items, which may be
2536 : * being used to relieve memory pressure, don't
2537 : * incur MAYDAY_INTERVAL delay inbetween.
2538 : */
2539 0 : if (pwq->nr_active && need_to_create_worker(pool)) {
2540 0 : raw_spin_lock(&wq_mayday_lock);
2541 : /*
2542 : * Queue iff we aren't racing destruction
2543 : * and somebody else hasn't queued it already.
2544 : */
2545 0 : if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2546 0 : get_pwq(pwq);
2547 0 : list_add_tail(&pwq->mayday_node, &wq->maydays);
2548 : }
2549 0 : raw_spin_unlock(&wq_mayday_lock);
2550 : }
2551 : }
2552 :
2553 : /*
2554 : * Put the reference grabbed by send_mayday(). @pool won't
2555 : * go away while we're still attached to it.
2556 : */
2557 0 : put_pwq(pwq);
2558 :
2559 : /*
2560 : * Leave this pool. If need_more_worker() is %true, notify a
2561 : * regular worker; otherwise, we end up with 0 concurrency
2562 : * and stalling the execution.
2563 : */
2564 0 : if (need_more_worker(pool))
2565 0 : wake_up_worker(pool);
2566 :
2567 0 : raw_spin_unlock_irq(&pool->lock);
2568 :
2569 0 : worker_detach_from_pool(rescuer);
2570 :
2571 0 : raw_spin_lock_irq(&wq_mayday_lock);
2572 : }
2573 :
2574 7 : raw_spin_unlock_irq(&wq_mayday_lock);
2575 :
2576 7 : if (should_stop) {
2577 0 : __set_current_state(TASK_RUNNING);
2578 0 : set_pf_worker(false);
2579 0 : return 0;
2580 : }
2581 :
2582 : /* rescuers should never participate in concurrency management */
2583 7 : WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2584 7 : schedule();
2585 0 : goto repeat;
2586 : }
2587 :
2588 : /**
2589 : * check_flush_dependency - check for flush dependency sanity
2590 : * @target_wq: workqueue being flushed
2591 : * @target_work: work item being flushed (NULL for workqueue flushes)
2592 : *
2593 : * %current is trying to flush the whole @target_wq or @target_work on it.
2594 : * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2595 : * reclaiming memory or running on a workqueue which doesn't have
2596 : * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2597 : * a deadlock.
2598 : */
2599 11 : static void check_flush_dependency(struct workqueue_struct *target_wq,
2600 : struct work_struct *target_work)
2601 : {
2602 11 : work_func_t target_func = target_work ? target_work->func : NULL;
2603 11 : struct worker *worker;
2604 :
2605 11 : if (target_wq->flags & WQ_MEM_RECLAIM)
2606 : return;
2607 :
2608 8 : worker = current_wq_worker();
2609 :
2610 8 : WARN_ONCE(current->flags & PF_MEMALLOC,
2611 : "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2612 : current->pid, current->comm, target_wq->name, target_func);
2613 16 : WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2614 : (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2615 : "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2616 : worker->current_pwq->wq->name, worker->current_func,
2617 : target_wq->name, target_func);
2618 : }
2619 :
2620 : struct wq_barrier {
2621 : struct work_struct work;
2622 : struct completion done;
2623 : struct task_struct *task; /* purely informational */
2624 : };
2625 :
2626 11 : static void wq_barrier_func(struct work_struct *work)
2627 : {
2628 11 : struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2629 11 : complete(&barr->done);
2630 11 : }
2631 :
2632 : /**
2633 : * insert_wq_barrier - insert a barrier work
2634 : * @pwq: pwq to insert barrier into
2635 : * @barr: wq_barrier to insert
2636 : * @target: target work to attach @barr to
2637 : * @worker: worker currently executing @target, NULL if @target is not executing
2638 : *
2639 : * @barr is linked to @target such that @barr is completed only after
2640 : * @target finishes execution. Please note that the ordering
2641 : * guarantee is observed only with respect to @target and on the local
2642 : * cpu.
2643 : *
2644 : * Currently, a queued barrier can't be canceled. This is because
2645 : * try_to_grab_pending() can't determine whether the work to be
2646 : * grabbed is at the head of the queue and thus can't clear LINKED
2647 : * flag of the previous work while there must be a valid next work
2648 : * after a work with LINKED flag set.
2649 : *
2650 : * Note that when @worker is non-NULL, @target may be modified
2651 : * underneath us, so we can't reliably determine pwq from @target.
2652 : *
2653 : * CONTEXT:
2654 : * raw_spin_lock_irq(pool->lock).
2655 : */
2656 11 : static void insert_wq_barrier(struct pool_workqueue *pwq,
2657 : struct wq_barrier *barr,
2658 : struct work_struct *target, struct worker *worker)
2659 : {
2660 11 : struct list_head *head;
2661 11 : unsigned int linked = 0;
2662 :
2663 : /*
2664 : * debugobject calls are safe here even with pool->lock locked
2665 : * as we know for sure that this will not trigger any of the
2666 : * checks and call back into the fixup functions where we
2667 : * might deadlock.
2668 : */
2669 11 : INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2670 11 : __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2671 :
2672 11 : init_completion_map(&barr->done, &target->lockdep_map);
2673 :
2674 11 : barr->task = current;
2675 :
2676 : /*
2677 : * If @target is currently being executed, schedule the
2678 : * barrier to the worker; otherwise, put it after @target.
2679 : */
2680 11 : if (worker)
2681 3 : head = worker->scheduled.next;
2682 : else {
2683 8 : unsigned long *bits = work_data_bits(target);
2684 :
2685 8 : head = target->entry.next;
2686 : /* there can already be other linked works, inherit and set */
2687 8 : linked = *bits & WORK_STRUCT_LINKED;
2688 8 : __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2689 : }
2690 :
2691 11 : debug_work_activate(&barr->work);
2692 11 : insert_work(pwq, &barr->work, head,
2693 11 : work_color_to_flags(WORK_NO_COLOR) | linked);
2694 11 : }
2695 :
2696 : /**
2697 : * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2698 : * @wq: workqueue being flushed
2699 : * @flush_color: new flush color, < 0 for no-op
2700 : * @work_color: new work color, < 0 for no-op
2701 : *
2702 : * Prepare pwqs for workqueue flushing.
2703 : *
2704 : * If @flush_color is non-negative, flush_color on all pwqs should be
2705 : * -1. If no pwq has in-flight commands at the specified color, all
2706 : * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2707 : * has in flight commands, its pwq->flush_color is set to
2708 : * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2709 : * wakeup logic is armed and %true is returned.
2710 : *
2711 : * The caller should have initialized @wq->first_flusher prior to
2712 : * calling this function with non-negative @flush_color. If
2713 : * @flush_color is negative, no flush color update is done and %false
2714 : * is returned.
2715 : *
2716 : * If @work_color is non-negative, all pwqs should have the same
2717 : * work_color which is previous to @work_color and all will be
2718 : * advanced to @work_color.
2719 : *
2720 : * CONTEXT:
2721 : * mutex_lock(wq->mutex).
2722 : *
2723 : * Return:
2724 : * %true if @flush_color >= 0 and there's something to flush. %false
2725 : * otherwise.
2726 : */
2727 49 : static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2728 : int flush_color, int work_color)
2729 : {
2730 49 : bool wait = false;
2731 49 : struct pool_workqueue *pwq;
2732 :
2733 49 : if (flush_color >= 0) {
2734 49 : WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2735 49 : atomic_set(&wq->nr_pwqs_to_flush, 1);
2736 : }
2737 :
2738 242 : for_each_pwq(pwq, wq) {
2739 193 : struct worker_pool *pool = pwq->pool;
2740 :
2741 193 : raw_spin_lock_irq(&pool->lock);
2742 :
2743 193 : if (flush_color >= 0) {
2744 193 : WARN_ON_ONCE(pwq->flush_color != -1);
2745 :
2746 193 : if (pwq->nr_in_flight[flush_color]) {
2747 0 : pwq->flush_color = flush_color;
2748 0 : atomic_inc(&wq->nr_pwqs_to_flush);
2749 0 : wait = true;
2750 : }
2751 : }
2752 :
2753 193 : if (work_color >= 0) {
2754 193 : WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2755 193 : pwq->work_color = work_color;
2756 : }
2757 :
2758 193 : raw_spin_unlock_irq(&pool->lock);
2759 : }
2760 :
2761 98 : if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2762 49 : complete(&wq->first_flusher->done);
2763 :
2764 49 : return wait;
2765 : }
2766 :
2767 : /**
2768 : * flush_workqueue - ensure that any scheduled work has run to completion.
2769 : * @wq: workqueue to flush
2770 : *
2771 : * This function sleeps until all work items which were queued on entry
2772 : * have finished execution, but it is not livelocked by new incoming ones.
2773 : */
2774 49 : void flush_workqueue(struct workqueue_struct *wq)
2775 : {
2776 98 : struct wq_flusher this_flusher = {
2777 : .list = LIST_HEAD_INIT(this_flusher.list),
2778 : .flush_color = -1,
2779 49 : .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2780 : };
2781 49 : int next_color;
2782 :
2783 49 : if (WARN_ON(!wq_online))
2784 0 : return;
2785 :
2786 49 : lock_map_acquire(&wq->lockdep_map);
2787 49 : lock_map_release(&wq->lockdep_map);
2788 :
2789 49 : mutex_lock(&wq->mutex);
2790 :
2791 : /*
2792 : * Start-to-wait phase
2793 : */
2794 49 : next_color = work_next_color(wq->work_color);
2795 :
2796 49 : if (next_color != wq->flush_color) {
2797 : /*
2798 : * Color space is not full. The current work_color
2799 : * becomes our flush_color and work_color is advanced
2800 : * by one.
2801 : */
2802 49 : WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2803 49 : this_flusher.flush_color = wq->work_color;
2804 49 : wq->work_color = next_color;
2805 :
2806 49 : if (!wq->first_flusher) {
2807 : /* no flush in progress, become the first flusher */
2808 49 : WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2809 :
2810 49 : wq->first_flusher = &this_flusher;
2811 :
2812 49 : if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2813 : wq->work_color)) {
2814 : /* nothing to flush, done */
2815 49 : wq->flush_color = next_color;
2816 49 : wq->first_flusher = NULL;
2817 49 : goto out_unlock;
2818 : }
2819 : } else {
2820 : /* wait in queue */
2821 0 : WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2822 0 : list_add_tail(&this_flusher.list, &wq->flusher_queue);
2823 0 : flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2824 : }
2825 : } else {
2826 : /*
2827 : * Oops, color space is full, wait on overflow queue.
2828 : * The next flush completion will assign us
2829 : * flush_color and transfer to flusher_queue.
2830 : */
2831 0 : list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2832 : }
2833 :
2834 0 : check_flush_dependency(wq, NULL);
2835 :
2836 0 : mutex_unlock(&wq->mutex);
2837 :
2838 0 : wait_for_completion(&this_flusher.done);
2839 :
2840 : /*
2841 : * Wake-up-and-cascade phase
2842 : *
2843 : * First flushers are responsible for cascading flushes and
2844 : * handling overflow. Non-first flushers can simply return.
2845 : */
2846 0 : if (READ_ONCE(wq->first_flusher) != &this_flusher)
2847 : return;
2848 :
2849 0 : mutex_lock(&wq->mutex);
2850 :
2851 : /* we might have raced, check again with mutex held */
2852 0 : if (wq->first_flusher != &this_flusher)
2853 0 : goto out_unlock;
2854 :
2855 0 : WRITE_ONCE(wq->first_flusher, NULL);
2856 :
2857 0 : WARN_ON_ONCE(!list_empty(&this_flusher.list));
2858 0 : WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2859 :
2860 0 : while (true) {
2861 0 : struct wq_flusher *next, *tmp;
2862 :
2863 : /* complete all the flushers sharing the current flush color */
2864 0 : list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2865 0 : if (next->flush_color != wq->flush_color)
2866 : break;
2867 0 : list_del_init(&next->list);
2868 0 : complete(&next->done);
2869 : }
2870 :
2871 0 : WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2872 : wq->flush_color != work_next_color(wq->work_color));
2873 :
2874 : /* this flush_color is finished, advance by one */
2875 0 : wq->flush_color = work_next_color(wq->flush_color);
2876 :
2877 : /* one color has been freed, handle overflow queue */
2878 0 : if (!list_empty(&wq->flusher_overflow)) {
2879 : /*
2880 : * Assign the same color to all overflowed
2881 : * flushers, advance work_color and append to
2882 : * flusher_queue. This is the start-to-wait
2883 : * phase for these overflowed flushers.
2884 : */
2885 0 : list_for_each_entry(tmp, &wq->flusher_overflow, list)
2886 0 : tmp->flush_color = wq->work_color;
2887 :
2888 0 : wq->work_color = work_next_color(wq->work_color);
2889 :
2890 0 : list_splice_tail_init(&wq->flusher_overflow,
2891 : &wq->flusher_queue);
2892 0 : flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2893 : }
2894 :
2895 0 : if (list_empty(&wq->flusher_queue)) {
2896 0 : WARN_ON_ONCE(wq->flush_color != wq->work_color);
2897 : break;
2898 : }
2899 :
2900 : /*
2901 : * Need to flush more colors. Make the next flusher
2902 : * the new first flusher and arm pwqs.
2903 : */
2904 0 : WARN_ON_ONCE(wq->flush_color == wq->work_color);
2905 0 : WARN_ON_ONCE(wq->flush_color != next->flush_color);
2906 :
2907 0 : list_del_init(&next->list);
2908 0 : wq->first_flusher = next;
2909 :
2910 0 : if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2911 : break;
2912 :
2913 : /*
2914 : * Meh... this color is already done, clear first
2915 : * flusher and repeat cascading.
2916 : */
2917 0 : wq->first_flusher = NULL;
2918 : }
2919 :
2920 0 : out_unlock:
2921 49 : mutex_unlock(&wq->mutex);
2922 : }
2923 : EXPORT_SYMBOL(flush_workqueue);
2924 :
2925 : /**
2926 : * drain_workqueue - drain a workqueue
2927 : * @wq: workqueue to drain
2928 : *
2929 : * Wait until the workqueue becomes empty. While draining is in progress,
2930 : * only chain queueing is allowed. IOW, only currently pending or running
2931 : * work items on @wq can queue further work items on it. @wq is flushed
2932 : * repeatedly until it becomes empty. The number of flushing is determined
2933 : * by the depth of chaining and should be relatively short. Whine if it
2934 : * takes too long.
2935 : */
2936 1 : void drain_workqueue(struct workqueue_struct *wq)
2937 : {
2938 1 : unsigned int flush_cnt = 0;
2939 1 : struct pool_workqueue *pwq;
2940 :
2941 : /*
2942 : * __queue_work() needs to test whether there are drainers, is much
2943 : * hotter than drain_workqueue() and already looks at @wq->flags.
2944 : * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2945 : */
2946 1 : mutex_lock(&wq->mutex);
2947 1 : if (!wq->nr_drainers++)
2948 1 : wq->flags |= __WQ_DRAINING;
2949 1 : mutex_unlock(&wq->mutex);
2950 1 : reflush:
2951 1 : flush_workqueue(wq);
2952 :
2953 1 : mutex_lock(&wq->mutex);
2954 :
2955 2 : for_each_pwq(pwq, wq) {
2956 1 : bool drained;
2957 :
2958 1 : raw_spin_lock_irq(&pwq->pool->lock);
2959 1 : drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2960 1 : raw_spin_unlock_irq(&pwq->pool->lock);
2961 :
2962 1 : if (drained)
2963 1 : continue;
2964 :
2965 0 : if (++flush_cnt == 10 ||
2966 0 : (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2967 0 : pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
2968 : wq->name, __func__, flush_cnt);
2969 :
2970 0 : mutex_unlock(&wq->mutex);
2971 0 : goto reflush;
2972 : }
2973 :
2974 1 : if (!--wq->nr_drainers)
2975 1 : wq->flags &= ~__WQ_DRAINING;
2976 1 : mutex_unlock(&wq->mutex);
2977 1 : }
2978 : EXPORT_SYMBOL_GPL(drain_workqueue);
2979 :
2980 475 : static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2981 : bool from_cancel)
2982 : {
2983 475 : struct worker *worker = NULL;
2984 475 : struct worker_pool *pool;
2985 475 : struct pool_workqueue *pwq;
2986 :
2987 475 : might_sleep();
2988 :
2989 475 : rcu_read_lock();
2990 475 : pool = get_work_pool(work);
2991 475 : if (!pool) {
2992 458 : rcu_read_unlock();
2993 458 : return false;
2994 : }
2995 :
2996 17 : raw_spin_lock_irq(&pool->lock);
2997 : /* see the comment in try_to_grab_pending() with the same code */
2998 17 : pwq = get_work_pwq(work);
2999 17 : if (pwq) {
3000 8 : if (unlikely(pwq->pool != pool))
3001 0 : goto already_gone;
3002 : } else {
3003 9 : worker = find_worker_executing_work(pool, work);
3004 9 : if (!worker)
3005 6 : goto already_gone;
3006 3 : pwq = worker->current_pwq;
3007 : }
3008 :
3009 11 : check_flush_dependency(pwq->wq, work);
3010 :
3011 11 : insert_wq_barrier(pwq, barr, work, worker);
3012 11 : raw_spin_unlock_irq(&pool->lock);
3013 :
3014 : /*
3015 : * Force a lock recursion deadlock when using flush_work() inside a
3016 : * single-threaded or rescuer equipped workqueue.
3017 : *
3018 : * For single threaded workqueues the deadlock happens when the work
3019 : * is after the work issuing the flush_work(). For rescuer equipped
3020 : * workqueues the deadlock happens when the rescuer stalls, blocking
3021 : * forward progress.
3022 : */
3023 11 : if (!from_cancel &&
3024 11 : (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3025 3 : lock_map_acquire(&pwq->wq->lockdep_map);
3026 3 : lock_map_release(&pwq->wq->lockdep_map);
3027 : }
3028 11 : rcu_read_unlock();
3029 11 : return true;
3030 6 : already_gone:
3031 6 : raw_spin_unlock_irq(&pool->lock);
3032 6 : rcu_read_unlock();
3033 6 : return false;
3034 : }
3035 :
3036 475 : static bool __flush_work(struct work_struct *work, bool from_cancel)
3037 : {
3038 475 : struct wq_barrier barr;
3039 :
3040 475 : if (WARN_ON(!wq_online))
3041 : return false;
3042 :
3043 475 : if (WARN_ON(!work->func))
3044 : return false;
3045 :
3046 475 : if (!from_cancel) {
3047 291 : lock_map_acquire(&work->lockdep_map);
3048 291 : lock_map_release(&work->lockdep_map);
3049 : }
3050 :
3051 475 : if (start_flush_work(work, &barr, from_cancel)) {
3052 11 : wait_for_completion(&barr.done);
3053 11 : destroy_work_on_stack(&barr.work);
3054 11 : return true;
3055 : } else {
3056 : return false;
3057 : }
3058 : }
3059 :
3060 : /**
3061 : * flush_work - wait for a work to finish executing the last queueing instance
3062 : * @work: the work to flush
3063 : *
3064 : * Wait until @work has finished execution. @work is guaranteed to be idle
3065 : * on return if it hasn't been requeued since flush started.
3066 : *
3067 : * Return:
3068 : * %true if flush_work() waited for the work to finish execution,
3069 : * %false if it was already idle.
3070 : */
3071 291 : bool flush_work(struct work_struct *work)
3072 : {
3073 285 : return __flush_work(work, false);
3074 : }
3075 : EXPORT_SYMBOL_GPL(flush_work);
3076 :
3077 : struct cwt_wait {
3078 : wait_queue_entry_t wait;
3079 : struct work_struct *work;
3080 : };
3081 :
3082 0 : static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3083 : {
3084 0 : struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3085 :
3086 0 : if (cwait->work != key)
3087 : return 0;
3088 0 : return autoremove_wake_function(wait, mode, sync, key);
3089 : }
3090 :
3091 184 : static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3092 : {
3093 184 : static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3094 184 : unsigned long flags;
3095 184 : int ret;
3096 :
3097 184 : do {
3098 184 : ret = try_to_grab_pending(work, is_dwork, &flags);
3099 : /*
3100 : * If someone else is already canceling, wait for it to
3101 : * finish. flush_work() doesn't work for PREEMPT_NONE
3102 : * because we may get scheduled between @work's completion
3103 : * and the other canceling task resuming and clearing
3104 : * CANCELING - flush_work() will return false immediately
3105 : * as @work is no longer busy, try_to_grab_pending() will
3106 : * return -ENOENT as @work is still being canceled and the
3107 : * other canceling task won't be able to clear CANCELING as
3108 : * we're hogging the CPU.
3109 : *
3110 : * Let's wait for completion using a waitqueue. As this
3111 : * may lead to the thundering herd problem, use a custom
3112 : * wake function which matches @work along with exclusive
3113 : * wait and wakeup.
3114 : */
3115 184 : if (unlikely(ret == -ENOENT)) {
3116 0 : struct cwt_wait cwait;
3117 :
3118 0 : init_wait(&cwait.wait);
3119 0 : cwait.wait.func = cwt_wakefn;
3120 0 : cwait.work = work;
3121 :
3122 0 : prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3123 : TASK_UNINTERRUPTIBLE);
3124 0 : if (work_is_canceling(work))
3125 0 : schedule();
3126 0 : finish_wait(&cancel_waitq, &cwait.wait);
3127 : }
3128 184 : } while (unlikely(ret < 0));
3129 :
3130 : /* tell other tasks trying to grab @work to back off */
3131 184 : mark_work_canceling(work);
3132 184 : local_irq_restore(flags);
3133 :
3134 : /*
3135 : * This allows canceling during early boot. We know that @work
3136 : * isn't executing.
3137 : */
3138 184 : if (wq_online)
3139 184 : __flush_work(work, true);
3140 :
3141 184 : clear_work_data(work);
3142 :
3143 : /*
3144 : * Paired with prepare_to_wait() above so that either
3145 : * waitqueue_active() is visible here or !work_is_canceling() is
3146 : * visible there.
3147 : */
3148 184 : smp_mb();
3149 184 : if (waitqueue_active(&cancel_waitq))
3150 0 : __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3151 :
3152 184 : return ret;
3153 : }
3154 :
3155 : /**
3156 : * cancel_work_sync - cancel a work and wait for it to finish
3157 : * @work: the work to cancel
3158 : *
3159 : * Cancel @work and wait for its execution to finish. This function
3160 : * can be used even if the work re-queues itself or migrates to
3161 : * another workqueue. On return from this function, @work is
3162 : * guaranteed to be not pending or executing on any CPU.
3163 : *
3164 : * cancel_work_sync(&delayed_work->work) must not be used for
3165 : * delayed_work's. Use cancel_delayed_work_sync() instead.
3166 : *
3167 : * The caller must ensure that the workqueue on which @work was last
3168 : * queued can't be destroyed before this function returns.
3169 : *
3170 : * Return:
3171 : * %true if @work was pending, %false otherwise.
3172 : */
3173 184 : bool cancel_work_sync(struct work_struct *work)
3174 : {
3175 184 : return __cancel_work_timer(work, false);
3176 : }
3177 : EXPORT_SYMBOL_GPL(cancel_work_sync);
3178 :
3179 : /**
3180 : * flush_delayed_work - wait for a dwork to finish executing the last queueing
3181 : * @dwork: the delayed work to flush
3182 : *
3183 : * Delayed timer is cancelled and the pending work is queued for
3184 : * immediate execution. Like flush_work(), this function only
3185 : * considers the last queueing instance of @dwork.
3186 : *
3187 : * Return:
3188 : * %true if flush_work() waited for the work to finish execution,
3189 : * %false if it was already idle.
3190 : */
3191 6 : bool flush_delayed_work(struct delayed_work *dwork)
3192 : {
3193 6 : local_irq_disable();
3194 6 : if (del_timer_sync(&dwork->timer))
3195 6 : __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3196 6 : local_irq_enable();
3197 6 : return flush_work(&dwork->work);
3198 : }
3199 : EXPORT_SYMBOL(flush_delayed_work);
3200 :
3201 : /**
3202 : * flush_rcu_work - wait for a rwork to finish executing the last queueing
3203 : * @rwork: the rcu work to flush
3204 : *
3205 : * Return:
3206 : * %true if flush_rcu_work() waited for the work to finish execution,
3207 : * %false if it was already idle.
3208 : */
3209 0 : bool flush_rcu_work(struct rcu_work *rwork)
3210 : {
3211 0 : if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3212 0 : rcu_barrier();
3213 0 : flush_work(&rwork->work);
3214 0 : return true;
3215 : } else {
3216 0 : return flush_work(&rwork->work);
3217 : }
3218 : }
3219 : EXPORT_SYMBOL(flush_rcu_work);
3220 :
3221 2 : static bool __cancel_work(struct work_struct *work, bool is_dwork)
3222 : {
3223 2 : unsigned long flags;
3224 2 : int ret;
3225 :
3226 2 : do {
3227 2 : ret = try_to_grab_pending(work, is_dwork, &flags);
3228 2 : } while (unlikely(ret == -EAGAIN));
3229 :
3230 2 : if (unlikely(ret < 0))
3231 : return false;
3232 :
3233 2 : set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3234 2 : local_irq_restore(flags);
3235 2 : return ret;
3236 : }
3237 :
3238 : /**
3239 : * cancel_delayed_work - cancel a delayed work
3240 : * @dwork: delayed_work to cancel
3241 : *
3242 : * Kill off a pending delayed_work.
3243 : *
3244 : * Return: %true if @dwork was pending and canceled; %false if it wasn't
3245 : * pending.
3246 : *
3247 : * Note:
3248 : * The work callback function may still be running on return, unless
3249 : * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3250 : * use cancel_delayed_work_sync() to wait on it.
3251 : *
3252 : * This function is safe to call from any context including IRQ handler.
3253 : */
3254 2 : bool cancel_delayed_work(struct delayed_work *dwork)
3255 : {
3256 2 : return __cancel_work(&dwork->work, true);
3257 : }
3258 : EXPORT_SYMBOL(cancel_delayed_work);
3259 :
3260 : /**
3261 : * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3262 : * @dwork: the delayed work cancel
3263 : *
3264 : * This is cancel_work_sync() for delayed works.
3265 : *
3266 : * Return:
3267 : * %true if @dwork was pending, %false otherwise.
3268 : */
3269 0 : bool cancel_delayed_work_sync(struct delayed_work *dwork)
3270 : {
3271 0 : return __cancel_work_timer(&dwork->work, true);
3272 : }
3273 : EXPORT_SYMBOL(cancel_delayed_work_sync);
3274 :
3275 : /**
3276 : * schedule_on_each_cpu - execute a function synchronously on each online CPU
3277 : * @func: the function to call
3278 : *
3279 : * schedule_on_each_cpu() executes @func on each online CPU using the
3280 : * system workqueue and blocks until all CPUs have completed.
3281 : * schedule_on_each_cpu() is very slow.
3282 : *
3283 : * Return:
3284 : * 0 on success, -errno on failure.
3285 : */
3286 0 : int schedule_on_each_cpu(work_func_t func)
3287 : {
3288 0 : int cpu;
3289 0 : struct work_struct __percpu *works;
3290 :
3291 0 : works = alloc_percpu(struct work_struct);
3292 0 : if (!works)
3293 : return -ENOMEM;
3294 :
3295 0 : get_online_cpus();
3296 :
3297 0 : for_each_online_cpu(cpu) {
3298 0 : struct work_struct *work = per_cpu_ptr(works, cpu);
3299 :
3300 0 : INIT_WORK(work, func);
3301 0 : schedule_work_on(cpu, work);
3302 : }
3303 :
3304 0 : for_each_online_cpu(cpu)
3305 0 : flush_work(per_cpu_ptr(works, cpu));
3306 :
3307 0 : put_online_cpus();
3308 0 : free_percpu(works);
3309 0 : return 0;
3310 : }
3311 :
3312 : /**
3313 : * execute_in_process_context - reliably execute the routine with user context
3314 : * @fn: the function to execute
3315 : * @ew: guaranteed storage for the execute work structure (must
3316 : * be available when the work executes)
3317 : *
3318 : * Executes the function immediately if process context is available,
3319 : * otherwise schedules the function for delayed execution.
3320 : *
3321 : * Return: 0 - function was executed
3322 : * 1 - function was scheduled for execution
3323 : */
3324 0 : int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3325 : {
3326 0 : if (!in_interrupt()) {
3327 0 : fn(&ew->work);
3328 0 : return 0;
3329 : }
3330 :
3331 0 : INIT_WORK(&ew->work, fn);
3332 0 : schedule_work(&ew->work);
3333 :
3334 0 : return 1;
3335 : }
3336 : EXPORT_SYMBOL_GPL(execute_in_process_context);
3337 :
3338 : /**
3339 : * free_workqueue_attrs - free a workqueue_attrs
3340 : * @attrs: workqueue_attrs to free
3341 : *
3342 : * Undo alloc_workqueue_attrs().
3343 : */
3344 11 : void free_workqueue_attrs(struct workqueue_attrs *attrs)
3345 : {
3346 0 : if (attrs) {
3347 6 : free_cpumask_var(attrs->cpumask);
3348 6 : kfree(attrs);
3349 : }
3350 0 : }
3351 :
3352 : /**
3353 : * alloc_workqueue_attrs - allocate a workqueue_attrs
3354 : *
3355 : * Allocate a new workqueue_attrs, initialize with default settings and
3356 : * return it.
3357 : *
3358 : * Return: The allocated new workqueue_attr on success. %NULL on failure.
3359 : */
3360 28 : struct workqueue_attrs *alloc_workqueue_attrs(void)
3361 : {
3362 28 : struct workqueue_attrs *attrs;
3363 :
3364 28 : attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3365 28 : if (!attrs)
3366 0 : goto fail;
3367 28 : if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3368 : goto fail;
3369 :
3370 28 : cpumask_copy(attrs->cpumask, cpu_possible_mask);
3371 28 : return attrs;
3372 0 : fail:
3373 0 : free_workqueue_attrs(attrs);
3374 0 : return NULL;
3375 : }
3376 :
3377 21 : static void copy_workqueue_attrs(struct workqueue_attrs *to,
3378 : const struct workqueue_attrs *from)
3379 : {
3380 21 : to->nice = from->nice;
3381 21 : cpumask_copy(to->cpumask, from->cpumask);
3382 : /*
3383 : * Unlike hash and equality test, this function doesn't ignore
3384 : * ->no_numa as it is used for both pool and wq attrs. Instead,
3385 : * get_unbound_pool() explicitly clears ->no_numa after copying.
3386 : */
3387 21 : to->no_numa = from->no_numa;
3388 : }
3389 :
3390 : /* hash value of the content of @attr */
3391 5 : static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3392 : {
3393 5 : u32 hash = 0;
3394 :
3395 5 : hash = jhash_1word(attrs->nice, hash);
3396 5 : hash = jhash(cpumask_bits(attrs->cpumask),
3397 : BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3398 5 : return hash;
3399 : }
3400 :
3401 : /* content equality test */
3402 4 : static bool wqattrs_equal(const struct workqueue_attrs *a,
3403 : const struct workqueue_attrs *b)
3404 : {
3405 4 : if (a->nice != b->nice)
3406 : return false;
3407 4 : if (!cpumask_equal(a->cpumask, b->cpumask))
3408 : return false;
3409 : return true;
3410 : }
3411 :
3412 : /**
3413 : * init_worker_pool - initialize a newly zalloc'd worker_pool
3414 : * @pool: worker_pool to initialize
3415 : *
3416 : * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3417 : *
3418 : * Return: 0 on success, -errno on failure. Even on failure, all fields
3419 : * inside @pool proper are initialized and put_unbound_pool() can be called
3420 : * on @pool safely to release it.
3421 : */
3422 9 : static int init_worker_pool(struct worker_pool *pool)
3423 : {
3424 9 : raw_spin_lock_init(&pool->lock);
3425 9 : pool->id = -1;
3426 9 : pool->cpu = -1;
3427 9 : pool->node = NUMA_NO_NODE;
3428 9 : pool->flags |= POOL_DISASSOCIATED;
3429 9 : pool->watchdog_ts = jiffies;
3430 9 : INIT_LIST_HEAD(&pool->worklist);
3431 9 : INIT_LIST_HEAD(&pool->idle_list);
3432 9 : hash_init(pool->busy_hash);
3433 :
3434 9 : timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3435 :
3436 9 : timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3437 :
3438 9 : INIT_LIST_HEAD(&pool->workers);
3439 :
3440 9 : ida_init(&pool->worker_ida);
3441 9 : INIT_HLIST_NODE(&pool->hash_node);
3442 9 : pool->refcnt = 1;
3443 :
3444 : /* shouldn't fail above this point */
3445 9 : pool->attrs = alloc_workqueue_attrs();
3446 9 : if (!pool->attrs)
3447 0 : return -ENOMEM;
3448 : return 0;
3449 : }
3450 :
3451 : #ifdef CONFIG_LOCKDEP
3452 20 : static void wq_init_lockdep(struct workqueue_struct *wq)
3453 : {
3454 20 : char *lock_name;
3455 :
3456 20 : lockdep_register_key(&wq->key);
3457 20 : lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3458 20 : if (!lock_name)
3459 0 : lock_name = wq->name;
3460 :
3461 20 : wq->lock_name = lock_name;
3462 20 : lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3463 20 : }
3464 :
3465 1 : static void wq_unregister_lockdep(struct workqueue_struct *wq)
3466 : {
3467 1 : lockdep_unregister_key(&wq->key);
3468 : }
3469 :
3470 1 : static void wq_free_lockdep(struct workqueue_struct *wq)
3471 : {
3472 1 : if (wq->lock_name != wq->name)
3473 1 : kfree(wq->lock_name);
3474 : }
3475 : #else
3476 : static void wq_init_lockdep(struct workqueue_struct *wq)
3477 : {
3478 : }
3479 :
3480 : static void wq_unregister_lockdep(struct workqueue_struct *wq)
3481 : {
3482 : }
3483 :
3484 : static void wq_free_lockdep(struct workqueue_struct *wq)
3485 : {
3486 : }
3487 : #endif
3488 :
3489 1 : static void rcu_free_wq(struct rcu_head *rcu)
3490 : {
3491 1 : struct workqueue_struct *wq =
3492 1 : container_of(rcu, struct workqueue_struct, rcu);
3493 :
3494 1 : wq_free_lockdep(wq);
3495 :
3496 1 : if (!(wq->flags & WQ_UNBOUND))
3497 0 : free_percpu(wq->cpu_pwqs);
3498 : else
3499 1 : free_workqueue_attrs(wq->unbound_attrs);
3500 :
3501 1 : kfree(wq);
3502 1 : }
3503 :
3504 0 : static void rcu_free_pool(struct rcu_head *rcu)
3505 : {
3506 0 : struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3507 :
3508 0 : ida_destroy(&pool->worker_ida);
3509 0 : free_workqueue_attrs(pool->attrs);
3510 0 : kfree(pool);
3511 0 : }
3512 :
3513 : /* This returns with the lock held on success (pool manager is inactive). */
3514 0 : static bool wq_manager_inactive(struct worker_pool *pool)
3515 : {
3516 0 : raw_spin_lock_irq(&pool->lock);
3517 :
3518 0 : if (pool->flags & POOL_MANAGER_ACTIVE) {
3519 0 : raw_spin_unlock_irq(&pool->lock);
3520 0 : return false;
3521 : }
3522 : return true;
3523 : }
3524 :
3525 : /**
3526 : * put_unbound_pool - put a worker_pool
3527 : * @pool: worker_pool to put
3528 : *
3529 : * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3530 : * safe manner. get_unbound_pool() calls this function on its failure path
3531 : * and this function should be able to release pools which went through,
3532 : * successfully or not, init_worker_pool().
3533 : *
3534 : * Should be called with wq_pool_mutex held.
3535 : */
3536 1 : static void put_unbound_pool(struct worker_pool *pool)
3537 : {
3538 1 : DECLARE_COMPLETION_ONSTACK(detach_completion);
3539 1 : struct worker *worker;
3540 :
3541 3 : lockdep_assert_held(&wq_pool_mutex);
3542 :
3543 1 : if (--pool->refcnt)
3544 1 : return;
3545 :
3546 : /* sanity checks */
3547 0 : if (WARN_ON(!(pool->cpu < 0)) ||
3548 0 : WARN_ON(!list_empty(&pool->worklist)))
3549 : return;
3550 :
3551 : /* release id and unhash */
3552 0 : if (pool->id >= 0)
3553 0 : idr_remove(&worker_pool_idr, pool->id);
3554 0 : hash_del(&pool->hash_node);
3555 :
3556 : /*
3557 : * Become the manager and destroy all workers. This prevents
3558 : * @pool's workers from blocking on attach_mutex. We're the last
3559 : * manager and @pool gets freed with the flag set.
3560 : * Because of how wq_manager_inactive() works, we will hold the
3561 : * spinlock after a successful wait.
3562 : */
3563 0 : rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3564 : TASK_UNINTERRUPTIBLE);
3565 0 : pool->flags |= POOL_MANAGER_ACTIVE;
3566 :
3567 0 : while ((worker = first_idle_worker(pool)))
3568 0 : destroy_worker(worker);
3569 0 : WARN_ON(pool->nr_workers || pool->nr_idle);
3570 0 : raw_spin_unlock_irq(&pool->lock);
3571 :
3572 0 : mutex_lock(&wq_pool_attach_mutex);
3573 0 : if (!list_empty(&pool->workers))
3574 0 : pool->detach_completion = &detach_completion;
3575 0 : mutex_unlock(&wq_pool_attach_mutex);
3576 :
3577 0 : if (pool->detach_completion)
3578 0 : wait_for_completion(pool->detach_completion);
3579 :
3580 : /* shut down the timers */
3581 0 : del_timer_sync(&pool->idle_timer);
3582 0 : del_timer_sync(&pool->mayday_timer);
3583 :
3584 : /* RCU protected to allow dereferences from get_work_pool() */
3585 0 : call_rcu(&pool->rcu, rcu_free_pool);
3586 : }
3587 :
3588 : /**
3589 : * get_unbound_pool - get a worker_pool with the specified attributes
3590 : * @attrs: the attributes of the worker_pool to get
3591 : *
3592 : * Obtain a worker_pool which has the same attributes as @attrs, bump the
3593 : * reference count and return it. If there already is a matching
3594 : * worker_pool, it will be used; otherwise, this function attempts to
3595 : * create a new one.
3596 : *
3597 : * Should be called with wq_pool_mutex held.
3598 : *
3599 : * Return: On success, a worker_pool with the same attributes as @attrs.
3600 : * On failure, %NULL.
3601 : */
3602 5 : static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3603 : {
3604 5 : u32 hash = wqattrs_hash(attrs);
3605 5 : struct worker_pool *pool;
3606 5 : int node;
3607 5 : int target_node = NUMA_NO_NODE;
3608 :
3609 15 : lockdep_assert_held(&wq_pool_mutex);
3610 :
3611 : /* do we already have a matching pool? */
3612 10 : hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3613 4 : if (wqattrs_equal(pool->attrs, attrs)) {
3614 4 : pool->refcnt++;
3615 4 : return pool;
3616 : }
3617 : }
3618 :
3619 : /* if cpumask is contained inside a NUMA node, we belong to that node */
3620 1 : if (wq_numa_enabled) {
3621 0 : for_each_node(node) {
3622 0 : if (cpumask_subset(attrs->cpumask,
3623 0 : wq_numa_possible_cpumask[node])) {
3624 : target_node = node;
3625 : break;
3626 : }
3627 : }
3628 : }
3629 :
3630 : /* nope, create a new one */
3631 1 : pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3632 1 : if (!pool || init_worker_pool(pool) < 0)
3633 0 : goto fail;
3634 :
3635 1 : lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3636 1 : copy_workqueue_attrs(pool->attrs, attrs);
3637 1 : pool->node = target_node;
3638 :
3639 : /*
3640 : * no_numa isn't a worker_pool attribute, always clear it. See
3641 : * 'struct workqueue_attrs' comments for detail.
3642 : */
3643 1 : pool->attrs->no_numa = false;
3644 :
3645 1 : if (worker_pool_assign_id(pool) < 0)
3646 0 : goto fail;
3647 :
3648 : /* create and start the initial worker */
3649 1 : if (wq_online && !create_worker(pool))
3650 0 : goto fail;
3651 :
3652 : /* install */
3653 1 : hash_add(unbound_pool_hash, &pool->hash_node, hash);
3654 :
3655 1 : return pool;
3656 0 : fail:
3657 0 : if (pool)
3658 0 : put_unbound_pool(pool);
3659 : return NULL;
3660 : }
3661 :
3662 1 : static void rcu_free_pwq(struct rcu_head *rcu)
3663 : {
3664 1 : kmem_cache_free(pwq_cache,
3665 1 : container_of(rcu, struct pool_workqueue, rcu));
3666 1 : }
3667 :
3668 : /*
3669 : * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3670 : * and needs to be destroyed.
3671 : */
3672 1 : static void pwq_unbound_release_workfn(struct work_struct *work)
3673 : {
3674 1 : struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3675 : unbound_release_work);
3676 1 : struct workqueue_struct *wq = pwq->wq;
3677 1 : struct worker_pool *pool = pwq->pool;
3678 1 : bool is_last;
3679 :
3680 1 : if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3681 : return;
3682 :
3683 1 : mutex_lock(&wq->mutex);
3684 1 : list_del_rcu(&pwq->pwqs_node);
3685 1 : is_last = list_empty(&wq->pwqs);
3686 1 : mutex_unlock(&wq->mutex);
3687 :
3688 1 : mutex_lock(&wq_pool_mutex);
3689 1 : put_unbound_pool(pool);
3690 1 : mutex_unlock(&wq_pool_mutex);
3691 :
3692 1 : call_rcu(&pwq->rcu, rcu_free_pwq);
3693 :
3694 : /*
3695 : * If we're the last pwq going away, @wq is already dead and no one
3696 : * is gonna access it anymore. Schedule RCU free.
3697 : */
3698 1 : if (is_last) {
3699 1 : wq_unregister_lockdep(wq);
3700 1 : call_rcu(&wq->rcu, rcu_free_wq);
3701 : }
3702 : }
3703 :
3704 : /**
3705 : * pwq_adjust_max_active - update a pwq's max_active to the current setting
3706 : * @pwq: target pool_workqueue
3707 : *
3708 : * If @pwq isn't freezing, set @pwq->max_active to the associated
3709 : * workqueue's saved_max_active and activate delayed work items
3710 : * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3711 : */
3712 130 : static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3713 : {
3714 130 : struct workqueue_struct *wq = pwq->wq;
3715 130 : bool freezable = wq->flags & WQ_FREEZABLE;
3716 130 : unsigned long flags;
3717 :
3718 : /* for @wq->saved_max_active */
3719 260 : lockdep_assert_held(&wq->mutex);
3720 :
3721 : /* fast exit for non-freezable wqs */
3722 130 : if (!freezable && pwq->max_active == wq->saved_max_active)
3723 : return;
3724 :
3725 : /* this function can be called during early boot w/ irq disabled */
3726 73 : raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3727 :
3728 : /*
3729 : * During [un]freezing, the caller is responsible for ensuring that
3730 : * this function is called at least once after @workqueue_freezing
3731 : * is updated and visible.
3732 : */
3733 73 : if (!freezable || !workqueue_freezing) {
3734 73 : bool kick = false;
3735 :
3736 73 : pwq->max_active = wq->saved_max_active;
3737 :
3738 73 : while (!list_empty(&pwq->delayed_works) &&
3739 0 : pwq->nr_active < pwq->max_active) {
3740 0 : pwq_activate_first_delayed(pwq);
3741 0 : kick = true;
3742 : }
3743 :
3744 : /*
3745 : * Need to kick a worker after thawed or an unbound wq's
3746 : * max_active is bumped. In realtime scenarios, always kicking a
3747 : * worker will cause interference on the isolated cpu cores, so
3748 : * let's kick iff work items were activated.
3749 : */
3750 73 : if (kick)
3751 0 : wake_up_worker(pwq->pool);
3752 : } else {
3753 0 : pwq->max_active = 0;
3754 : }
3755 :
3756 73 : raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3757 : }
3758 :
3759 : /* initialize newly alloced @pwq which is associated with @wq and @pool */
3760 65 : static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3761 : struct worker_pool *pool)
3762 : {
3763 65 : BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3764 :
3765 65 : memset(pwq, 0, sizeof(*pwq));
3766 :
3767 65 : pwq->pool = pool;
3768 65 : pwq->wq = wq;
3769 65 : pwq->flush_color = -1;
3770 65 : pwq->refcnt = 1;
3771 65 : INIT_LIST_HEAD(&pwq->delayed_works);
3772 65 : INIT_LIST_HEAD(&pwq->pwqs_node);
3773 65 : INIT_LIST_HEAD(&pwq->mayday_node);
3774 65 : INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3775 65 : }
3776 :
3777 : /* sync @pwq with the current state of its associated wq and link it */
3778 70 : static void link_pwq(struct pool_workqueue *pwq)
3779 : {
3780 70 : struct workqueue_struct *wq = pwq->wq;
3781 :
3782 140 : lockdep_assert_held(&wq->mutex);
3783 :
3784 : /* may be called multiple times, ignore if already linked */
3785 70 : if (!list_empty(&pwq->pwqs_node))
3786 : return;
3787 :
3788 : /* set the matching work_color */
3789 65 : pwq->work_color = wq->work_color;
3790 :
3791 : /* sync max_active to the current setting */
3792 65 : pwq_adjust_max_active(pwq);
3793 :
3794 : /* link in @pwq */
3795 65 : list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3796 : }
3797 :
3798 : /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3799 5 : static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3800 : const struct workqueue_attrs *attrs)
3801 : {
3802 5 : struct worker_pool *pool;
3803 5 : struct pool_workqueue *pwq;
3804 :
3805 15 : lockdep_assert_held(&wq_pool_mutex);
3806 :
3807 5 : pool = get_unbound_pool(attrs);
3808 5 : if (!pool)
3809 : return NULL;
3810 :
3811 5 : pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3812 5 : if (!pwq) {
3813 0 : put_unbound_pool(pool);
3814 0 : return NULL;
3815 : }
3816 :
3817 5 : init_pwq(pwq, wq, pool);
3818 5 : return pwq;
3819 : }
3820 :
3821 : /**
3822 : * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3823 : * @attrs: the wq_attrs of the default pwq of the target workqueue
3824 : * @node: the target NUMA node
3825 : * @cpu_going_down: if >= 0, the CPU to consider as offline
3826 : * @cpumask: outarg, the resulting cpumask
3827 : *
3828 : * Calculate the cpumask a workqueue with @attrs should use on @node. If
3829 : * @cpu_going_down is >= 0, that cpu is considered offline during
3830 : * calculation. The result is stored in @cpumask.
3831 : *
3832 : * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3833 : * enabled and @node has online CPUs requested by @attrs, the returned
3834 : * cpumask is the intersection of the possible CPUs of @node and
3835 : * @attrs->cpumask.
3836 : *
3837 : * The caller is responsible for ensuring that the cpumask of @node stays
3838 : * stable.
3839 : *
3840 : * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3841 : * %false if equal.
3842 : */
3843 5 : static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3844 : int cpu_going_down, cpumask_t *cpumask)
3845 : {
3846 5 : if (!wq_numa_enabled || attrs->no_numa)
3847 5 : goto use_dfl;
3848 :
3849 : /* does @node have any online CPUs @attrs wants? */
3850 0 : cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3851 0 : if (cpu_going_down >= 0)
3852 0 : cpumask_clear_cpu(cpu_going_down, cpumask);
3853 :
3854 0 : if (cpumask_empty(cpumask))
3855 0 : goto use_dfl;
3856 :
3857 : /* yeap, return possible CPUs in @node that @attrs wants */
3858 0 : cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3859 :
3860 0 : if (cpumask_empty(cpumask)) {
3861 0 : pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3862 : "possible intersect\n");
3863 0 : return false;
3864 : }
3865 :
3866 0 : return !cpumask_equal(cpumask, attrs->cpumask);
3867 :
3868 5 : use_dfl:
3869 5 : cpumask_copy(cpumask, attrs->cpumask);
3870 5 : return false;
3871 : }
3872 :
3873 : /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3874 5 : static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3875 : int node,
3876 : struct pool_workqueue *pwq)
3877 : {
3878 5 : struct pool_workqueue *old_pwq;
3879 :
3880 15 : lockdep_assert_held(&wq_pool_mutex);
3881 10 : lockdep_assert_held(&wq->mutex);
3882 :
3883 : /* link_pwq() can handle duplicate calls */
3884 5 : link_pwq(pwq);
3885 :
3886 5 : old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3887 5 : rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3888 5 : return old_pwq;
3889 : }
3890 :
3891 : /* context to store the prepared attrs & pwqs before applying */
3892 : struct apply_wqattrs_ctx {
3893 : struct workqueue_struct *wq; /* target workqueue */
3894 : struct workqueue_attrs *attrs; /* attrs to apply */
3895 : struct list_head list; /* queued for batching commit */
3896 : struct pool_workqueue *dfl_pwq;
3897 : struct pool_workqueue *pwq_tbl[];
3898 : };
3899 :
3900 : /* free the resources after success or abort */
3901 5 : static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3902 : {
3903 5 : if (ctx) {
3904 5 : int node;
3905 :
3906 10 : for_each_node(node)
3907 5 : put_pwq_unlocked(ctx->pwq_tbl[node]);
3908 5 : put_pwq_unlocked(ctx->dfl_pwq);
3909 :
3910 5 : free_workqueue_attrs(ctx->attrs);
3911 :
3912 5 : kfree(ctx);
3913 : }
3914 5 : }
3915 :
3916 : /* allocate the attrs and pwqs for later installation */
3917 : static struct apply_wqattrs_ctx *
3918 5 : apply_wqattrs_prepare(struct workqueue_struct *wq,
3919 : const struct workqueue_attrs *attrs)
3920 : {
3921 5 : struct apply_wqattrs_ctx *ctx;
3922 5 : struct workqueue_attrs *new_attrs, *tmp_attrs;
3923 5 : int node;
3924 :
3925 15 : lockdep_assert_held(&wq_pool_mutex);
3926 :
3927 5 : ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3928 :
3929 5 : new_attrs = alloc_workqueue_attrs();
3930 5 : tmp_attrs = alloc_workqueue_attrs();
3931 5 : if (!ctx || !new_attrs || !tmp_attrs)
3932 0 : goto out_free;
3933 :
3934 : /*
3935 : * Calculate the attrs of the default pwq.
3936 : * If the user configured cpumask doesn't overlap with the
3937 : * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3938 : */
3939 5 : copy_workqueue_attrs(new_attrs, attrs);
3940 5 : cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3941 5 : if (unlikely(cpumask_empty(new_attrs->cpumask)))
3942 0 : cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3943 :
3944 : /*
3945 : * We may create multiple pwqs with differing cpumasks. Make a
3946 : * copy of @new_attrs which will be modified and used to obtain
3947 : * pools.
3948 : */
3949 5 : copy_workqueue_attrs(tmp_attrs, new_attrs);
3950 :
3951 : /*
3952 : * If something goes wrong during CPU up/down, we'll fall back to
3953 : * the default pwq covering whole @attrs->cpumask. Always create
3954 : * it even if we don't use it immediately.
3955 : */
3956 5 : ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3957 5 : if (!ctx->dfl_pwq)
3958 0 : goto out_free;
3959 :
3960 10 : for_each_node(node) {
3961 5 : if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3962 0 : ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3963 0 : if (!ctx->pwq_tbl[node])
3964 0 : goto out_free;
3965 : } else {
3966 5 : ctx->dfl_pwq->refcnt++;
3967 5 : ctx->pwq_tbl[node] = ctx->dfl_pwq;
3968 : }
3969 : }
3970 :
3971 : /* save the user configured attrs and sanitize it. */
3972 5 : copy_workqueue_attrs(new_attrs, attrs);
3973 5 : cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3974 5 : ctx->attrs = new_attrs;
3975 :
3976 5 : ctx->wq = wq;
3977 5 : free_workqueue_attrs(tmp_attrs);
3978 5 : return ctx;
3979 :
3980 0 : out_free:
3981 0 : free_workqueue_attrs(tmp_attrs);
3982 0 : free_workqueue_attrs(new_attrs);
3983 0 : apply_wqattrs_cleanup(ctx);
3984 0 : return NULL;
3985 : }
3986 :
3987 : /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3988 5 : static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3989 : {
3990 5 : int node;
3991 :
3992 : /* all pwqs have been created successfully, let's install'em */
3993 5 : mutex_lock(&ctx->wq->mutex);
3994 :
3995 5 : copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3996 :
3997 : /* save the previous pwq and install the new one */
3998 10 : for_each_node(node)
3999 5 : ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4000 : ctx->pwq_tbl[node]);
4001 :
4002 : /* @dfl_pwq might not have been used, ensure it's linked */
4003 5 : link_pwq(ctx->dfl_pwq);
4004 5 : swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4005 :
4006 5 : mutex_unlock(&ctx->wq->mutex);
4007 5 : }
4008 :
4009 0 : static void apply_wqattrs_lock(void)
4010 : {
4011 : /* CPUs should stay stable across pwq creations and installations */
4012 0 : get_online_cpus();
4013 0 : mutex_lock(&wq_pool_mutex);
4014 0 : }
4015 :
4016 0 : static void apply_wqattrs_unlock(void)
4017 : {
4018 0 : mutex_unlock(&wq_pool_mutex);
4019 0 : put_online_cpus();
4020 0 : }
4021 :
4022 5 : static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4023 : const struct workqueue_attrs *attrs)
4024 : {
4025 5 : struct apply_wqattrs_ctx *ctx;
4026 :
4027 : /* only unbound workqueues can change attributes */
4028 5 : if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4029 : return -EINVAL;
4030 :
4031 : /* creating multiple pwqs breaks ordering guarantee */
4032 5 : if (!list_empty(&wq->pwqs)) {
4033 0 : if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4034 : return -EINVAL;
4035 :
4036 0 : wq->flags &= ~__WQ_ORDERED;
4037 : }
4038 :
4039 5 : ctx = apply_wqattrs_prepare(wq, attrs);
4040 5 : if (!ctx)
4041 : return -ENOMEM;
4042 :
4043 : /* the ctx has been prepared successfully, let's commit it */
4044 5 : apply_wqattrs_commit(ctx);
4045 5 : apply_wqattrs_cleanup(ctx);
4046 :
4047 5 : return 0;
4048 : }
4049 :
4050 : /**
4051 : * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4052 : * @wq: the target workqueue
4053 : * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4054 : *
4055 : * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
4056 : * machines, this function maps a separate pwq to each NUMA node with
4057 : * possibles CPUs in @attrs->cpumask so that work items are affine to the
4058 : * NUMA node it was issued on. Older pwqs are released as in-flight work
4059 : * items finish. Note that a work item which repeatedly requeues itself
4060 : * back-to-back will stay on its current pwq.
4061 : *
4062 : * Performs GFP_KERNEL allocations.
4063 : *
4064 : * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4065 : *
4066 : * Return: 0 on success and -errno on failure.
4067 : */
4068 5 : int apply_workqueue_attrs(struct workqueue_struct *wq,
4069 : const struct workqueue_attrs *attrs)
4070 : {
4071 5 : int ret;
4072 :
4073 5 : lockdep_assert_cpus_held();
4074 :
4075 5 : mutex_lock(&wq_pool_mutex);
4076 5 : ret = apply_workqueue_attrs_locked(wq, attrs);
4077 5 : mutex_unlock(&wq_pool_mutex);
4078 :
4079 5 : return ret;
4080 : }
4081 :
4082 : /**
4083 : * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4084 : * @wq: the target workqueue
4085 : * @cpu: the CPU coming up or going down
4086 : * @online: whether @cpu is coming up or going down
4087 : *
4088 : * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4089 : * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
4090 : * @wq accordingly.
4091 : *
4092 : * If NUMA affinity can't be adjusted due to memory allocation failure, it
4093 : * falls back to @wq->dfl_pwq which may not be optimal but is always
4094 : * correct.
4095 : *
4096 : * Note that when the last allowed CPU of a NUMA node goes offline for a
4097 : * workqueue with a cpumask spanning multiple nodes, the workers which were
4098 : * already executing the work items for the workqueue will lose their CPU
4099 : * affinity and may execute on any CPU. This is similar to how per-cpu
4100 : * workqueues behave on CPU_DOWN. If a workqueue user wants strict
4101 : * affinity, it's the user's responsibility to flush the work item from
4102 : * CPU_DOWN_PREPARE.
4103 : */
4104 39 : static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4105 : bool online)
4106 : {
4107 39 : int node = cpu_to_node(cpu);
4108 39 : int cpu_off = online ? -1 : cpu;
4109 39 : struct pool_workqueue *old_pwq = NULL, *pwq;
4110 39 : struct workqueue_attrs *target_attrs;
4111 39 : cpumask_t *cpumask;
4112 :
4113 117 : lockdep_assert_held(&wq_pool_mutex);
4114 :
4115 39 : if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4116 0 : wq->unbound_attrs->no_numa)
4117 : return;
4118 :
4119 : /*
4120 : * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4121 : * Let's use a preallocated one. The following buf is protected by
4122 : * CPU hotplug exclusion.
4123 : */
4124 0 : target_attrs = wq_update_unbound_numa_attrs_buf;
4125 0 : cpumask = target_attrs->cpumask;
4126 :
4127 0 : copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4128 0 : pwq = unbound_pwq_by_node(wq, node);
4129 :
4130 : /*
4131 : * Let's determine what needs to be done. If the target cpumask is
4132 : * different from the default pwq's, we need to compare it to @pwq's
4133 : * and create a new one if they don't match. If the target cpumask
4134 : * equals the default pwq's, the default pwq should be used.
4135 : */
4136 0 : if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4137 0 : if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4138 : return;
4139 : } else {
4140 0 : goto use_dfl_pwq;
4141 : }
4142 :
4143 : /* create a new pwq */
4144 0 : pwq = alloc_unbound_pwq(wq, target_attrs);
4145 0 : if (!pwq) {
4146 0 : pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4147 : wq->name);
4148 0 : goto use_dfl_pwq;
4149 : }
4150 :
4151 : /* Install the new pwq. */
4152 0 : mutex_lock(&wq->mutex);
4153 0 : old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4154 0 : goto out_unlock;
4155 :
4156 0 : use_dfl_pwq:
4157 0 : mutex_lock(&wq->mutex);
4158 0 : raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4159 0 : get_pwq(wq->dfl_pwq);
4160 0 : raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4161 0 : old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4162 0 : out_unlock:
4163 0 : mutex_unlock(&wq->mutex);
4164 0 : put_pwq_unlocked(old_pwq);
4165 : }
4166 :
4167 20 : static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4168 : {
4169 20 : bool highpri = wq->flags & WQ_HIGHPRI;
4170 20 : int cpu, ret;
4171 :
4172 20 : if (!(wq->flags & WQ_UNBOUND)) {
4173 15 : wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4174 15 : if (!wq->cpu_pwqs)
4175 : return -ENOMEM;
4176 :
4177 75 : for_each_possible_cpu(cpu) {
4178 60 : struct pool_workqueue *pwq =
4179 60 : per_cpu_ptr(wq->cpu_pwqs, cpu);
4180 60 : struct worker_pool *cpu_pools =
4181 60 : per_cpu(cpu_worker_pools, cpu);
4182 :
4183 60 : init_pwq(pwq, wq, &cpu_pools[highpri]);
4184 :
4185 60 : mutex_lock(&wq->mutex);
4186 60 : link_pwq(pwq);
4187 60 : mutex_unlock(&wq->mutex);
4188 : }
4189 : return 0;
4190 : }
4191 :
4192 5 : get_online_cpus();
4193 5 : if (wq->flags & __WQ_ORDERED) {
4194 2 : ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4195 : /* there should only be single pwq for ordering guarantee */
4196 4 : WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4197 : wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4198 : "ordering guarantee broken for workqueue %s\n", wq->name);
4199 : } else {
4200 3 : ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4201 : }
4202 5 : put_online_cpus();
4203 :
4204 5 : return ret;
4205 : }
4206 :
4207 20 : static int wq_clamp_max_active(int max_active, unsigned int flags,
4208 : const char *name)
4209 : {
4210 20 : int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4211 :
4212 20 : if (max_active < 1 || max_active > lim)
4213 0 : pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4214 : max_active, name, 1, lim);
4215 :
4216 20 : return clamp_val(max_active, 1, lim);
4217 : }
4218 :
4219 : /*
4220 : * Workqueues which may be used during memory reclaim should have a rescuer
4221 : * to guarantee forward progress.
4222 : */
4223 20 : static int init_rescuer(struct workqueue_struct *wq)
4224 : {
4225 20 : struct worker *rescuer;
4226 20 : int ret;
4227 :
4228 20 : if (!(wq->flags & WQ_MEM_RECLAIM))
4229 : return 0;
4230 :
4231 7 : rescuer = alloc_worker(NUMA_NO_NODE);
4232 7 : if (!rescuer)
4233 : return -ENOMEM;
4234 :
4235 7 : rescuer->rescue_wq = wq;
4236 7 : rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4237 7 : if (IS_ERR(rescuer->task)) {
4238 0 : ret = PTR_ERR(rescuer->task);
4239 0 : kfree(rescuer);
4240 0 : return ret;
4241 : }
4242 :
4243 7 : wq->rescuer = rescuer;
4244 7 : kthread_bind_mask(rescuer->task, cpu_possible_mask);
4245 7 : wake_up_process(rescuer->task);
4246 :
4247 7 : return 0;
4248 : }
4249 :
4250 : __printf(1, 4)
4251 20 : struct workqueue_struct *alloc_workqueue(const char *fmt,
4252 : unsigned int flags,
4253 : int max_active, ...)
4254 : {
4255 20 : size_t tbl_size = 0;
4256 20 : va_list args;
4257 20 : struct workqueue_struct *wq;
4258 20 : struct pool_workqueue *pwq;
4259 :
4260 : /*
4261 : * Unbound && max_active == 1 used to imply ordered, which is no
4262 : * longer the case on NUMA machines due to per-node pools. While
4263 : * alloc_ordered_workqueue() is the right way to create an ordered
4264 : * workqueue, keep the previous behavior to avoid subtle breakages
4265 : * on NUMA.
4266 : */
4267 20 : if ((flags & WQ_UNBOUND) && max_active == 1)
4268 2 : flags |= __WQ_ORDERED;
4269 :
4270 : /* see the comment above the definition of WQ_POWER_EFFICIENT */
4271 20 : if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4272 0 : flags |= WQ_UNBOUND;
4273 :
4274 : /* allocate wq and format name */
4275 20 : if (flags & WQ_UNBOUND)
4276 5 : tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4277 :
4278 20 : wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4279 20 : if (!wq)
4280 : return NULL;
4281 :
4282 20 : if (flags & WQ_UNBOUND) {
4283 5 : wq->unbound_attrs = alloc_workqueue_attrs();
4284 5 : if (!wq->unbound_attrs)
4285 0 : goto err_free_wq;
4286 : }
4287 :
4288 20 : va_start(args, max_active);
4289 20 : vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4290 20 : va_end(args);
4291 :
4292 20 : max_active = max_active ?: WQ_DFL_ACTIVE;
4293 20 : max_active = wq_clamp_max_active(max_active, flags, wq->name);
4294 :
4295 : /* init wq */
4296 20 : wq->flags = flags;
4297 20 : wq->saved_max_active = max_active;
4298 20 : mutex_init(&wq->mutex);
4299 20 : atomic_set(&wq->nr_pwqs_to_flush, 0);
4300 20 : INIT_LIST_HEAD(&wq->pwqs);
4301 20 : INIT_LIST_HEAD(&wq->flusher_queue);
4302 20 : INIT_LIST_HEAD(&wq->flusher_overflow);
4303 20 : INIT_LIST_HEAD(&wq->maydays);
4304 :
4305 20 : wq_init_lockdep(wq);
4306 20 : INIT_LIST_HEAD(&wq->list);
4307 :
4308 20 : if (alloc_and_link_pwqs(wq) < 0)
4309 0 : goto err_unreg_lockdep;
4310 :
4311 20 : if (wq_online && init_rescuer(wq) < 0)
4312 0 : goto err_destroy;
4313 :
4314 20 : if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4315 0 : goto err_destroy;
4316 :
4317 : /*
4318 : * wq_pool_mutex protects global freeze state and workqueues list.
4319 : * Grab it, adjust max_active and add the new @wq to workqueues
4320 : * list.
4321 : */
4322 20 : mutex_lock(&wq_pool_mutex);
4323 :
4324 20 : mutex_lock(&wq->mutex);
4325 85 : for_each_pwq(pwq, wq)
4326 65 : pwq_adjust_max_active(pwq);
4327 20 : mutex_unlock(&wq->mutex);
4328 :
4329 20 : list_add_tail_rcu(&wq->list, &workqueues);
4330 :
4331 20 : mutex_unlock(&wq_pool_mutex);
4332 :
4333 20 : return wq;
4334 :
4335 0 : err_unreg_lockdep:
4336 0 : wq_unregister_lockdep(wq);
4337 0 : wq_free_lockdep(wq);
4338 0 : err_free_wq:
4339 0 : free_workqueue_attrs(wq->unbound_attrs);
4340 0 : kfree(wq);
4341 0 : return NULL;
4342 0 : err_destroy:
4343 0 : destroy_workqueue(wq);
4344 0 : return NULL;
4345 : }
4346 : EXPORT_SYMBOL_GPL(alloc_workqueue);
4347 :
4348 1 : static bool pwq_busy(struct pool_workqueue *pwq)
4349 : {
4350 1 : int i;
4351 :
4352 16 : for (i = 0; i < WORK_NR_COLORS; i++)
4353 15 : if (pwq->nr_in_flight[i])
4354 : return true;
4355 :
4356 1 : if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4357 : return true;
4358 1 : if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4359 0 : return true;
4360 :
4361 : return false;
4362 : }
4363 :
4364 : /**
4365 : * destroy_workqueue - safely terminate a workqueue
4366 : * @wq: target workqueue
4367 : *
4368 : * Safely destroy a workqueue. All work currently pending will be done first.
4369 : */
4370 1 : void destroy_workqueue(struct workqueue_struct *wq)
4371 : {
4372 1 : struct pool_workqueue *pwq;
4373 1 : int node;
4374 :
4375 : /*
4376 : * Remove it from sysfs first so that sanity check failure doesn't
4377 : * lead to sysfs name conflicts.
4378 : */
4379 2 : workqueue_sysfs_unregister(wq);
4380 :
4381 : /* drain it before proceeding with destruction */
4382 1 : drain_workqueue(wq);
4383 :
4384 : /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4385 1 : if (wq->rescuer) {
4386 0 : struct worker *rescuer = wq->rescuer;
4387 :
4388 : /* this prevents new queueing */
4389 0 : raw_spin_lock_irq(&wq_mayday_lock);
4390 0 : wq->rescuer = NULL;
4391 0 : raw_spin_unlock_irq(&wq_mayday_lock);
4392 :
4393 : /* rescuer will empty maydays list before exiting */
4394 0 : kthread_stop(rescuer->task);
4395 0 : kfree(rescuer);
4396 : }
4397 :
4398 : /*
4399 : * Sanity checks - grab all the locks so that we wait for all
4400 : * in-flight operations which may do put_pwq().
4401 : */
4402 1 : mutex_lock(&wq_pool_mutex);
4403 1 : mutex_lock(&wq->mutex);
4404 2 : for_each_pwq(pwq, wq) {
4405 1 : raw_spin_lock_irq(&pwq->pool->lock);
4406 1 : if (WARN_ON(pwq_busy(pwq))) {
4407 0 : pr_warn("%s: %s has the following busy pwq\n",
4408 : __func__, wq->name);
4409 0 : show_pwq(pwq);
4410 0 : raw_spin_unlock_irq(&pwq->pool->lock);
4411 0 : mutex_unlock(&wq->mutex);
4412 0 : mutex_unlock(&wq_pool_mutex);
4413 0 : show_workqueue_state();
4414 0 : return;
4415 : }
4416 1 : raw_spin_unlock_irq(&pwq->pool->lock);
4417 : }
4418 1 : mutex_unlock(&wq->mutex);
4419 :
4420 : /*
4421 : * wq list is used to freeze wq, remove from list after
4422 : * flushing is complete in case freeze races us.
4423 : */
4424 1 : list_del_rcu(&wq->list);
4425 1 : mutex_unlock(&wq_pool_mutex);
4426 :
4427 1 : if (!(wq->flags & WQ_UNBOUND)) {
4428 0 : wq_unregister_lockdep(wq);
4429 : /*
4430 : * The base ref is never dropped on per-cpu pwqs. Directly
4431 : * schedule RCU free.
4432 : */
4433 0 : call_rcu(&wq->rcu, rcu_free_wq);
4434 : } else {
4435 : /*
4436 : * We're the sole accessor of @wq at this point. Directly
4437 : * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4438 : * @wq will be freed when the last pwq is released.
4439 : */
4440 2 : for_each_node(node) {
4441 1 : pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4442 1 : RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4443 1 : put_pwq_unlocked(pwq);
4444 : }
4445 :
4446 : /*
4447 : * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4448 : * put. Don't access it afterwards.
4449 : */
4450 1 : pwq = wq->dfl_pwq;
4451 1 : wq->dfl_pwq = NULL;
4452 1 : put_pwq_unlocked(pwq);
4453 : }
4454 : }
4455 : EXPORT_SYMBOL_GPL(destroy_workqueue);
4456 :
4457 : /**
4458 : * workqueue_set_max_active - adjust max_active of a workqueue
4459 : * @wq: target workqueue
4460 : * @max_active: new max_active value.
4461 : *
4462 : * Set max_active of @wq to @max_active.
4463 : *
4464 : * CONTEXT:
4465 : * Don't call from IRQ context.
4466 : */
4467 0 : void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4468 : {
4469 0 : struct pool_workqueue *pwq;
4470 :
4471 : /* disallow meddling with max_active for ordered workqueues */
4472 0 : if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4473 : return;
4474 :
4475 0 : max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4476 :
4477 0 : mutex_lock(&wq->mutex);
4478 :
4479 0 : wq->flags &= ~__WQ_ORDERED;
4480 0 : wq->saved_max_active = max_active;
4481 :
4482 0 : for_each_pwq(pwq, wq)
4483 0 : pwq_adjust_max_active(pwq);
4484 :
4485 0 : mutex_unlock(&wq->mutex);
4486 : }
4487 : EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4488 :
4489 : /**
4490 : * current_work - retrieve %current task's work struct
4491 : *
4492 : * Determine if %current task is a workqueue worker and what it's working on.
4493 : * Useful to find out the context that the %current task is running in.
4494 : *
4495 : * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4496 : */
4497 0 : struct work_struct *current_work(void)
4498 : {
4499 0 : struct worker *worker = current_wq_worker();
4500 :
4501 0 : return worker ? worker->current_work : NULL;
4502 : }
4503 : EXPORT_SYMBOL(current_work);
4504 :
4505 : /**
4506 : * current_is_workqueue_rescuer - is %current workqueue rescuer?
4507 : *
4508 : * Determine whether %current is a workqueue rescuer. Can be used from
4509 : * work functions to determine whether it's being run off the rescuer task.
4510 : *
4511 : * Return: %true if %current is a workqueue rescuer. %false otherwise.
4512 : */
4513 5 : bool current_is_workqueue_rescuer(void)
4514 : {
4515 5 : struct worker *worker = current_wq_worker();
4516 :
4517 5 : return worker && worker->rescue_wq;
4518 : }
4519 :
4520 : /**
4521 : * workqueue_congested - test whether a workqueue is congested
4522 : * @cpu: CPU in question
4523 : * @wq: target workqueue
4524 : *
4525 : * Test whether @wq's cpu workqueue for @cpu is congested. There is
4526 : * no synchronization around this function and the test result is
4527 : * unreliable and only useful as advisory hints or for debugging.
4528 : *
4529 : * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4530 : * Note that both per-cpu and unbound workqueues may be associated with
4531 : * multiple pool_workqueues which have separate congested states. A
4532 : * workqueue being congested on one CPU doesn't mean the workqueue is also
4533 : * contested on other CPUs / NUMA nodes.
4534 : *
4535 : * Return:
4536 : * %true if congested, %false otherwise.
4537 : */
4538 0 : bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4539 : {
4540 0 : struct pool_workqueue *pwq;
4541 0 : bool ret;
4542 :
4543 0 : rcu_read_lock();
4544 0 : preempt_disable();
4545 :
4546 0 : if (cpu == WORK_CPU_UNBOUND)
4547 0 : cpu = smp_processor_id();
4548 :
4549 0 : if (!(wq->flags & WQ_UNBOUND))
4550 0 : pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4551 : else
4552 0 : pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4553 :
4554 0 : ret = !list_empty(&pwq->delayed_works);
4555 0 : preempt_enable();
4556 0 : rcu_read_unlock();
4557 :
4558 0 : return ret;
4559 : }
4560 : EXPORT_SYMBOL_GPL(workqueue_congested);
4561 :
4562 : /**
4563 : * work_busy - test whether a work is currently pending or running
4564 : * @work: the work to be tested
4565 : *
4566 : * Test whether @work is currently pending or running. There is no
4567 : * synchronization around this function and the test result is
4568 : * unreliable and only useful as advisory hints or for debugging.
4569 : *
4570 : * Return:
4571 : * OR'd bitmask of WORK_BUSY_* bits.
4572 : */
4573 0 : unsigned int work_busy(struct work_struct *work)
4574 : {
4575 0 : struct worker_pool *pool;
4576 0 : unsigned long flags;
4577 0 : unsigned int ret = 0;
4578 :
4579 0 : if (work_pending(work))
4580 0 : ret |= WORK_BUSY_PENDING;
4581 :
4582 0 : rcu_read_lock();
4583 0 : pool = get_work_pool(work);
4584 0 : if (pool) {
4585 0 : raw_spin_lock_irqsave(&pool->lock, flags);
4586 0 : if (find_worker_executing_work(pool, work))
4587 0 : ret |= WORK_BUSY_RUNNING;
4588 0 : raw_spin_unlock_irqrestore(&pool->lock, flags);
4589 : }
4590 0 : rcu_read_unlock();
4591 :
4592 0 : return ret;
4593 : }
4594 : EXPORT_SYMBOL_GPL(work_busy);
4595 :
4596 : /**
4597 : * set_worker_desc - set description for the current work item
4598 : * @fmt: printf-style format string
4599 : * @...: arguments for the format string
4600 : *
4601 : * This function can be called by a running work function to describe what
4602 : * the work item is about. If the worker task gets dumped, this
4603 : * information will be printed out together to help debugging. The
4604 : * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4605 : */
4606 5 : void set_worker_desc(const char *fmt, ...)
4607 : {
4608 5 : struct worker *worker = current_wq_worker();
4609 5 : va_list args;
4610 :
4611 5 : if (worker) {
4612 5 : va_start(args, fmt);
4613 5 : vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4614 5 : va_end(args);
4615 : }
4616 5 : }
4617 : EXPORT_SYMBOL_GPL(set_worker_desc);
4618 :
4619 : /**
4620 : * print_worker_info - print out worker information and description
4621 : * @log_lvl: the log level to use when printing
4622 : * @task: target task
4623 : *
4624 : * If @task is a worker and currently executing a work item, print out the
4625 : * name of the workqueue being serviced and worker description set with
4626 : * set_worker_desc() by the currently executing work item.
4627 : *
4628 : * This function can be safely called on any task as long as the
4629 : * task_struct itself is accessible. While safe, this function isn't
4630 : * synchronized and may print out mixups or garbages of limited length.
4631 : */
4632 1 : void print_worker_info(const char *log_lvl, struct task_struct *task)
4633 : {
4634 1 : work_func_t *fn = NULL;
4635 1 : char name[WQ_NAME_LEN] = { };
4636 1 : char desc[WORKER_DESC_LEN] = { };
4637 1 : struct pool_workqueue *pwq = NULL;
4638 1 : struct workqueue_struct *wq = NULL;
4639 1 : struct worker *worker;
4640 :
4641 1 : if (!(task->flags & PF_WQ_WORKER))
4642 1 : return;
4643 :
4644 : /*
4645 : * This function is called without any synchronization and @task
4646 : * could be in any state. Be careful with dereferences.
4647 : */
4648 0 : worker = kthread_probe_data(task);
4649 :
4650 : /*
4651 : * Carefully copy the associated workqueue's workfn, name and desc.
4652 : * Keep the original last '\0' in case the original is garbage.
4653 : */
4654 0 : copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4655 0 : copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4656 0 : copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4657 0 : copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4658 0 : copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4659 :
4660 0 : if (fn || name[0] || desc[0]) {
4661 0 : printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4662 0 : if (strcmp(name, desc))
4663 0 : pr_cont(" (%s)", desc);
4664 0 : pr_cont("\n");
4665 : }
4666 : }
4667 :
4668 0 : static void pr_cont_pool_info(struct worker_pool *pool)
4669 : {
4670 0 : pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4671 0 : if (pool->node != NUMA_NO_NODE)
4672 0 : pr_cont(" node=%d", pool->node);
4673 0 : pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4674 0 : }
4675 :
4676 0 : static void pr_cont_work(bool comma, struct work_struct *work)
4677 : {
4678 0 : if (work->func == wq_barrier_func) {
4679 0 : struct wq_barrier *barr;
4680 :
4681 0 : barr = container_of(work, struct wq_barrier, work);
4682 :
4683 0 : pr_cont("%s BAR(%d)", comma ? "," : "",
4684 : task_pid_nr(barr->task));
4685 : } else {
4686 0 : pr_cont("%s %ps", comma ? "," : "", work->func);
4687 : }
4688 0 : }
4689 :
4690 0 : static void show_pwq(struct pool_workqueue *pwq)
4691 : {
4692 0 : struct worker_pool *pool = pwq->pool;
4693 0 : struct work_struct *work;
4694 0 : struct worker *worker;
4695 0 : bool has_in_flight = false, has_pending = false;
4696 0 : int bkt;
4697 :
4698 0 : pr_info(" pwq %d:", pool->id);
4699 0 : pr_cont_pool_info(pool);
4700 :
4701 0 : pr_cont(" active=%d/%d refcnt=%d%s\n",
4702 : pwq->nr_active, pwq->max_active, pwq->refcnt,
4703 : !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4704 :
4705 0 : hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4706 0 : if (worker->current_pwq == pwq) {
4707 : has_in_flight = true;
4708 : break;
4709 : }
4710 : }
4711 0 : if (has_in_flight) {
4712 0 : bool comma = false;
4713 :
4714 0 : pr_info(" in-flight:");
4715 0 : hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4716 0 : if (worker->current_pwq != pwq)
4717 0 : continue;
4718 :
4719 0 : pr_cont("%s %d%s:%ps", comma ? "," : "",
4720 : task_pid_nr(worker->task),
4721 : worker->rescue_wq ? "(RESCUER)" : "",
4722 : worker->current_func);
4723 0 : list_for_each_entry(work, &worker->scheduled, entry)
4724 0 : pr_cont_work(false, work);
4725 : comma = true;
4726 : }
4727 0 : pr_cont("\n");
4728 : }
4729 :
4730 0 : list_for_each_entry(work, &pool->worklist, entry) {
4731 0 : if (get_work_pwq(work) == pwq) {
4732 : has_pending = true;
4733 : break;
4734 : }
4735 : }
4736 0 : if (has_pending) {
4737 0 : bool comma = false;
4738 :
4739 0 : pr_info(" pending:");
4740 0 : list_for_each_entry(work, &pool->worklist, entry) {
4741 0 : if (get_work_pwq(work) != pwq)
4742 0 : continue;
4743 :
4744 0 : pr_cont_work(comma, work);
4745 0 : comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4746 : }
4747 0 : pr_cont("\n");
4748 : }
4749 :
4750 0 : if (!list_empty(&pwq->delayed_works)) {
4751 0 : bool comma = false;
4752 :
4753 0 : pr_info(" delayed:");
4754 0 : list_for_each_entry(work, &pwq->delayed_works, entry) {
4755 0 : pr_cont_work(comma, work);
4756 0 : comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4757 : }
4758 0 : pr_cont("\n");
4759 : }
4760 0 : }
4761 :
4762 : /**
4763 : * show_workqueue_state - dump workqueue state
4764 : *
4765 : * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4766 : * all busy workqueues and pools.
4767 : */
4768 0 : void show_workqueue_state(void)
4769 : {
4770 0 : struct workqueue_struct *wq;
4771 0 : struct worker_pool *pool;
4772 0 : unsigned long flags;
4773 0 : int pi;
4774 :
4775 0 : rcu_read_lock();
4776 :
4777 0 : pr_info("Showing busy workqueues and worker pools:\n");
4778 :
4779 0 : list_for_each_entry_rcu(wq, &workqueues, list) {
4780 0 : struct pool_workqueue *pwq;
4781 0 : bool idle = true;
4782 :
4783 0 : for_each_pwq(pwq, wq) {
4784 0 : if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4785 0 : idle = false;
4786 0 : break;
4787 : }
4788 : }
4789 0 : if (idle)
4790 0 : continue;
4791 :
4792 0 : pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4793 :
4794 0 : for_each_pwq(pwq, wq) {
4795 0 : raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4796 0 : if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4797 0 : show_pwq(pwq);
4798 0 : raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4799 : /*
4800 : * We could be printing a lot from atomic context, e.g.
4801 : * sysrq-t -> show_workqueue_state(). Avoid triggering
4802 : * hard lockup.
4803 : */
4804 0 : touch_nmi_watchdog();
4805 : }
4806 : }
4807 :
4808 0 : for_each_pool(pool, pi) {
4809 0 : struct worker *worker;
4810 0 : bool first = true;
4811 :
4812 0 : raw_spin_lock_irqsave(&pool->lock, flags);
4813 0 : if (pool->nr_workers == pool->nr_idle)
4814 0 : goto next_pool;
4815 :
4816 0 : pr_info("pool %d:", pool->id);
4817 0 : pr_cont_pool_info(pool);
4818 0 : pr_cont(" hung=%us workers=%d",
4819 : jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4820 : pool->nr_workers);
4821 0 : if (pool->manager)
4822 0 : pr_cont(" manager: %d",
4823 : task_pid_nr(pool->manager->task));
4824 0 : list_for_each_entry(worker, &pool->idle_list, entry) {
4825 0 : pr_cont(" %s%d", first ? "idle: " : "",
4826 : task_pid_nr(worker->task));
4827 0 : first = false;
4828 : }
4829 0 : pr_cont("\n");
4830 0 : next_pool:
4831 0 : raw_spin_unlock_irqrestore(&pool->lock, flags);
4832 : /*
4833 : * We could be printing a lot from atomic context, e.g.
4834 : * sysrq-t -> show_workqueue_state(). Avoid triggering
4835 : * hard lockup.
4836 : */
4837 0 : touch_nmi_watchdog();
4838 : }
4839 :
4840 0 : rcu_read_unlock();
4841 0 : }
4842 :
4843 : /* used to show worker information through /proc/PID/{comm,stat,status} */
4844 24 : void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4845 : {
4846 24 : int off;
4847 :
4848 : /* always show the actual comm */
4849 24 : off = strscpy(buf, task->comm, size);
4850 24 : if (off < 0)
4851 : return;
4852 :
4853 : /* stabilize PF_WQ_WORKER and worker pool association */
4854 24 : mutex_lock(&wq_pool_attach_mutex);
4855 :
4856 24 : if (task->flags & PF_WQ_WORKER) {
4857 24 : struct worker *worker = kthread_data(task);
4858 24 : struct worker_pool *pool = worker->pool;
4859 :
4860 24 : if (pool) {
4861 17 : raw_spin_lock_irq(&pool->lock);
4862 : /*
4863 : * ->desc tracks information (wq name or
4864 : * set_worker_desc()) for the latest execution. If
4865 : * current, prepend '+', otherwise '-'.
4866 : */
4867 17 : if (worker->desc[0] != '\0') {
4868 12 : if (worker->current_work)
4869 0 : scnprintf(buf + off, size - off, "+%s",
4870 0 : worker->desc);
4871 : else
4872 12 : scnprintf(buf + off, size - off, "-%s",
4873 12 : worker->desc);
4874 : }
4875 17 : raw_spin_unlock_irq(&pool->lock);
4876 : }
4877 : }
4878 :
4879 24 : mutex_unlock(&wq_pool_attach_mutex);
4880 : }
4881 :
4882 : #ifdef CONFIG_SMP
4883 :
4884 : /*
4885 : * CPU hotplug.
4886 : *
4887 : * There are two challenges in supporting CPU hotplug. Firstly, there
4888 : * are a lot of assumptions on strong associations among work, pwq and
4889 : * pool which make migrating pending and scheduled works very
4890 : * difficult to implement without impacting hot paths. Secondly,
4891 : * worker pools serve mix of short, long and very long running works making
4892 : * blocked draining impractical.
4893 : *
4894 : * This is solved by allowing the pools to be disassociated from the CPU
4895 : * running as an unbound one and allowing it to be reattached later if the
4896 : * cpu comes back online.
4897 : */
4898 :
4899 0 : static void unbind_workers(int cpu)
4900 : {
4901 0 : struct worker_pool *pool;
4902 0 : struct worker *worker;
4903 :
4904 0 : for_each_cpu_worker_pool(pool, cpu) {
4905 0 : mutex_lock(&wq_pool_attach_mutex);
4906 0 : raw_spin_lock_irq(&pool->lock);
4907 :
4908 : /*
4909 : * We've blocked all attach/detach operations. Make all workers
4910 : * unbound and set DISASSOCIATED. Before this, all workers
4911 : * except for the ones which are still executing works from
4912 : * before the last CPU down must be on the cpu. After
4913 : * this, they may become diasporas.
4914 : */
4915 0 : for_each_pool_worker(worker, pool)
4916 0 : worker->flags |= WORKER_UNBOUND;
4917 :
4918 0 : pool->flags |= POOL_DISASSOCIATED;
4919 :
4920 0 : raw_spin_unlock_irq(&pool->lock);
4921 :
4922 0 : for_each_pool_worker(worker, pool) {
4923 0 : kthread_set_per_cpu(worker->task, -1);
4924 0 : WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
4925 : }
4926 :
4927 0 : mutex_unlock(&wq_pool_attach_mutex);
4928 :
4929 : /*
4930 : * Call schedule() so that we cross rq->lock and thus can
4931 : * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4932 : * This is necessary as scheduler callbacks may be invoked
4933 : * from other cpus.
4934 : */
4935 0 : schedule();
4936 :
4937 : /*
4938 : * Sched callbacks are disabled now. Zap nr_running.
4939 : * After this, nr_running stays zero and need_more_worker()
4940 : * and keep_working() are always true as long as the
4941 : * worklist is not empty. This pool now behaves as an
4942 : * unbound (in terms of concurrency management) pool which
4943 : * are served by workers tied to the pool.
4944 : */
4945 0 : atomic_set(&pool->nr_running, 0);
4946 :
4947 : /*
4948 : * With concurrency management just turned off, a busy
4949 : * worker blocking could lead to lengthy stalls. Kick off
4950 : * unbound chain execution of currently pending work items.
4951 : */
4952 0 : raw_spin_lock_irq(&pool->lock);
4953 0 : wake_up_worker(pool);
4954 0 : raw_spin_unlock_irq(&pool->lock);
4955 : }
4956 0 : }
4957 :
4958 : /**
4959 : * rebind_workers - rebind all workers of a pool to the associated CPU
4960 : * @pool: pool of interest
4961 : *
4962 : * @pool->cpu is coming online. Rebind all workers to the CPU.
4963 : */
4964 6 : static void rebind_workers(struct worker_pool *pool)
4965 : {
4966 6 : struct worker *worker;
4967 :
4968 18 : lockdep_assert_held(&wq_pool_attach_mutex);
4969 :
4970 : /*
4971 : * Restore CPU affinity of all workers. As all idle workers should
4972 : * be on the run-queue of the associated CPU before any local
4973 : * wake-ups for concurrency management happen, restore CPU affinity
4974 : * of all workers first and then clear UNBOUND. As we're called
4975 : * from CPU_ONLINE, the following shouldn't fail.
4976 : */
4977 24 : for_each_pool_worker(worker, pool) {
4978 6 : kthread_set_per_cpu(worker->task, pool->cpu);
4979 6 : WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4980 : pool->attrs->cpumask) < 0);
4981 : }
4982 :
4983 6 : raw_spin_lock_irq(&pool->lock);
4984 :
4985 6 : pool->flags &= ~POOL_DISASSOCIATED;
4986 :
4987 24 : for_each_pool_worker(worker, pool) {
4988 6 : unsigned int worker_flags = worker->flags;
4989 :
4990 : /*
4991 : * A bound idle worker should actually be on the runqueue
4992 : * of the associated CPU for local wake-ups targeting it to
4993 : * work. Kick all idle workers so that they migrate to the
4994 : * associated CPU. Doing this in the same loop as
4995 : * replacing UNBOUND with REBOUND is safe as no worker will
4996 : * be bound before @pool->lock is released.
4997 : */
4998 6 : if (worker_flags & WORKER_IDLE)
4999 6 : wake_up_process(worker->task);
5000 :
5001 : /*
5002 : * We want to clear UNBOUND but can't directly call
5003 : * worker_clr_flags() or adjust nr_running. Atomically
5004 : * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5005 : * @worker will clear REBOUND using worker_clr_flags() when
5006 : * it initiates the next execution cycle thus restoring
5007 : * concurrency management. Note that when or whether
5008 : * @worker clears REBOUND doesn't affect correctness.
5009 : *
5010 : * WRITE_ONCE() is necessary because @worker->flags may be
5011 : * tested without holding any lock in
5012 : * wq_worker_running(). Without it, NOT_RUNNING test may
5013 : * fail incorrectly leading to premature concurrency
5014 : * management operations.
5015 : */
5016 6 : WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5017 6 : worker_flags |= WORKER_REBOUND;
5018 6 : worker_flags &= ~WORKER_UNBOUND;
5019 6 : WRITE_ONCE(worker->flags, worker_flags);
5020 : }
5021 :
5022 6 : raw_spin_unlock_irq(&pool->lock);
5023 6 : }
5024 :
5025 : /**
5026 : * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5027 : * @pool: unbound pool of interest
5028 : * @cpu: the CPU which is coming up
5029 : *
5030 : * An unbound pool may end up with a cpumask which doesn't have any online
5031 : * CPUs. When a worker of such pool get scheduled, the scheduler resets
5032 : * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5033 : * online CPU before, cpus_allowed of all its workers should be restored.
5034 : */
5035 3 : static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5036 : {
5037 3 : static cpumask_t cpumask;
5038 3 : struct worker *worker;
5039 :
5040 9 : lockdep_assert_held(&wq_pool_attach_mutex);
5041 :
5042 : /* is @cpu allowed for @pool? */
5043 3 : if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5044 : return;
5045 :
5046 3 : cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5047 :
5048 : /* as we're called from CPU_ONLINE, the following shouldn't fail */
5049 12 : for_each_pool_worker(worker, pool)
5050 3 : WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5051 : }
5052 :
5053 3 : int workqueue_prepare_cpu(unsigned int cpu)
5054 : {
5055 3 : struct worker_pool *pool;
5056 :
5057 9 : for_each_cpu_worker_pool(pool, cpu) {
5058 6 : if (pool->nr_workers)
5059 0 : continue;
5060 6 : if (!create_worker(pool))
5061 : return -ENOMEM;
5062 : }
5063 : return 0;
5064 : }
5065 :
5066 3 : int workqueue_online_cpu(unsigned int cpu)
5067 : {
5068 3 : struct worker_pool *pool;
5069 3 : struct workqueue_struct *wq;
5070 3 : int pi;
5071 :
5072 3 : mutex_lock(&wq_pool_mutex);
5073 :
5074 57 : for_each_pool(pool, pi) {
5075 27 : mutex_lock(&wq_pool_attach_mutex);
5076 :
5077 27 : if (pool->cpu == cpu)
5078 6 : rebind_workers(pool);
5079 21 : else if (pool->cpu < 0)
5080 3 : restore_unbound_workers_cpumask(pool, cpu);
5081 :
5082 27 : mutex_unlock(&wq_pool_attach_mutex);
5083 : }
5084 :
5085 : /* update NUMA affinity of unbound workqueues */
5086 33 : list_for_each_entry(wq, &workqueues, list)
5087 30 : wq_update_unbound_numa(wq, cpu, true);
5088 :
5089 3 : mutex_unlock(&wq_pool_mutex);
5090 3 : return 0;
5091 : }
5092 :
5093 0 : int workqueue_offline_cpu(unsigned int cpu)
5094 : {
5095 0 : struct workqueue_struct *wq;
5096 :
5097 : /* unbinding per-cpu workers should happen on the local CPU */
5098 0 : if (WARN_ON(cpu != smp_processor_id()))
5099 : return -1;
5100 :
5101 0 : unbind_workers(cpu);
5102 :
5103 : /* update NUMA affinity of unbound workqueues */
5104 0 : mutex_lock(&wq_pool_mutex);
5105 0 : list_for_each_entry(wq, &workqueues, list)
5106 0 : wq_update_unbound_numa(wq, cpu, false);
5107 0 : mutex_unlock(&wq_pool_mutex);
5108 :
5109 0 : return 0;
5110 : }
5111 :
5112 : struct work_for_cpu {
5113 : struct work_struct work;
5114 : long (*fn)(void *);
5115 : void *arg;
5116 : long ret;
5117 : };
5118 :
5119 0 : static void work_for_cpu_fn(struct work_struct *work)
5120 : {
5121 0 : struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5122 :
5123 0 : wfc->ret = wfc->fn(wfc->arg);
5124 0 : }
5125 :
5126 : /**
5127 : * work_on_cpu - run a function in thread context on a particular cpu
5128 : * @cpu: the cpu to run on
5129 : * @fn: the function to run
5130 : * @arg: the function arg
5131 : *
5132 : * It is up to the caller to ensure that the cpu doesn't go offline.
5133 : * The caller must not hold any locks which would prevent @fn from completing.
5134 : *
5135 : * Return: The value @fn returns.
5136 : */
5137 0 : long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5138 : {
5139 0 : struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5140 :
5141 0 : INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5142 0 : schedule_work_on(cpu, &wfc.work);
5143 0 : flush_work(&wfc.work);
5144 0 : destroy_work_on_stack(&wfc.work);
5145 0 : return wfc.ret;
5146 : }
5147 : EXPORT_SYMBOL_GPL(work_on_cpu);
5148 :
5149 : /**
5150 : * work_on_cpu_safe - run a function in thread context on a particular cpu
5151 : * @cpu: the cpu to run on
5152 : * @fn: the function to run
5153 : * @arg: the function argument
5154 : *
5155 : * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5156 : * any locks which would prevent @fn from completing.
5157 : *
5158 : * Return: The value @fn returns.
5159 : */
5160 0 : long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5161 : {
5162 0 : long ret = -ENODEV;
5163 :
5164 0 : get_online_cpus();
5165 0 : if (cpu_online(cpu))
5166 0 : ret = work_on_cpu(cpu, fn, arg);
5167 0 : put_online_cpus();
5168 0 : return ret;
5169 : }
5170 : EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5171 : #endif /* CONFIG_SMP */
5172 :
5173 : #ifdef CONFIG_FREEZER
5174 :
5175 : /**
5176 : * freeze_workqueues_begin - begin freezing workqueues
5177 : *
5178 : * Start freezing workqueues. After this function returns, all freezable
5179 : * workqueues will queue new works to their delayed_works list instead of
5180 : * pool->worklist.
5181 : *
5182 : * CONTEXT:
5183 : * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5184 : */
5185 : void freeze_workqueues_begin(void)
5186 : {
5187 : struct workqueue_struct *wq;
5188 : struct pool_workqueue *pwq;
5189 :
5190 : mutex_lock(&wq_pool_mutex);
5191 :
5192 : WARN_ON_ONCE(workqueue_freezing);
5193 : workqueue_freezing = true;
5194 :
5195 : list_for_each_entry(wq, &workqueues, list) {
5196 : mutex_lock(&wq->mutex);
5197 : for_each_pwq(pwq, wq)
5198 : pwq_adjust_max_active(pwq);
5199 : mutex_unlock(&wq->mutex);
5200 : }
5201 :
5202 : mutex_unlock(&wq_pool_mutex);
5203 : }
5204 :
5205 : /**
5206 : * freeze_workqueues_busy - are freezable workqueues still busy?
5207 : *
5208 : * Check whether freezing is complete. This function must be called
5209 : * between freeze_workqueues_begin() and thaw_workqueues().
5210 : *
5211 : * CONTEXT:
5212 : * Grabs and releases wq_pool_mutex.
5213 : *
5214 : * Return:
5215 : * %true if some freezable workqueues are still busy. %false if freezing
5216 : * is complete.
5217 : */
5218 : bool freeze_workqueues_busy(void)
5219 : {
5220 : bool busy = false;
5221 : struct workqueue_struct *wq;
5222 : struct pool_workqueue *pwq;
5223 :
5224 : mutex_lock(&wq_pool_mutex);
5225 :
5226 : WARN_ON_ONCE(!workqueue_freezing);
5227 :
5228 : list_for_each_entry(wq, &workqueues, list) {
5229 : if (!(wq->flags & WQ_FREEZABLE))
5230 : continue;
5231 : /*
5232 : * nr_active is monotonically decreasing. It's safe
5233 : * to peek without lock.
5234 : */
5235 : rcu_read_lock();
5236 : for_each_pwq(pwq, wq) {
5237 : WARN_ON_ONCE(pwq->nr_active < 0);
5238 : if (pwq->nr_active) {
5239 : busy = true;
5240 : rcu_read_unlock();
5241 : goto out_unlock;
5242 : }
5243 : }
5244 : rcu_read_unlock();
5245 : }
5246 : out_unlock:
5247 : mutex_unlock(&wq_pool_mutex);
5248 : return busy;
5249 : }
5250 :
5251 : /**
5252 : * thaw_workqueues - thaw workqueues
5253 : *
5254 : * Thaw workqueues. Normal queueing is restored and all collected
5255 : * frozen works are transferred to their respective pool worklists.
5256 : *
5257 : * CONTEXT:
5258 : * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5259 : */
5260 : void thaw_workqueues(void)
5261 : {
5262 : struct workqueue_struct *wq;
5263 : struct pool_workqueue *pwq;
5264 :
5265 : mutex_lock(&wq_pool_mutex);
5266 :
5267 : if (!workqueue_freezing)
5268 : goto out_unlock;
5269 :
5270 : workqueue_freezing = false;
5271 :
5272 : /* restore max_active and repopulate worklist */
5273 : list_for_each_entry(wq, &workqueues, list) {
5274 : mutex_lock(&wq->mutex);
5275 : for_each_pwq(pwq, wq)
5276 : pwq_adjust_max_active(pwq);
5277 : mutex_unlock(&wq->mutex);
5278 : }
5279 :
5280 : out_unlock:
5281 : mutex_unlock(&wq_pool_mutex);
5282 : }
5283 : #endif /* CONFIG_FREEZER */
5284 :
5285 0 : static int workqueue_apply_unbound_cpumask(void)
5286 : {
5287 0 : LIST_HEAD(ctxs);
5288 0 : int ret = 0;
5289 0 : struct workqueue_struct *wq;
5290 0 : struct apply_wqattrs_ctx *ctx, *n;
5291 :
5292 0 : lockdep_assert_held(&wq_pool_mutex);
5293 :
5294 0 : list_for_each_entry(wq, &workqueues, list) {
5295 0 : if (!(wq->flags & WQ_UNBOUND))
5296 0 : continue;
5297 : /* creating multiple pwqs breaks ordering guarantee */
5298 0 : if (wq->flags & __WQ_ORDERED)
5299 0 : continue;
5300 :
5301 0 : ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5302 0 : if (!ctx) {
5303 : ret = -ENOMEM;
5304 : break;
5305 : }
5306 :
5307 0 : list_add_tail(&ctx->list, &ctxs);
5308 : }
5309 :
5310 0 : list_for_each_entry_safe(ctx, n, &ctxs, list) {
5311 0 : if (!ret)
5312 0 : apply_wqattrs_commit(ctx);
5313 0 : apply_wqattrs_cleanup(ctx);
5314 : }
5315 :
5316 0 : return ret;
5317 : }
5318 :
5319 : /**
5320 : * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5321 : * @cpumask: the cpumask to set
5322 : *
5323 : * The low-level workqueues cpumask is a global cpumask that limits
5324 : * the affinity of all unbound workqueues. This function check the @cpumask
5325 : * and apply it to all unbound workqueues and updates all pwqs of them.
5326 : *
5327 : * Retun: 0 - Success
5328 : * -EINVAL - Invalid @cpumask
5329 : * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5330 : */
5331 0 : int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5332 : {
5333 0 : int ret = -EINVAL;
5334 0 : cpumask_var_t saved_cpumask;
5335 :
5336 0 : if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5337 : return -ENOMEM;
5338 :
5339 : /*
5340 : * Not excluding isolated cpus on purpose.
5341 : * If the user wishes to include them, we allow that.
5342 : */
5343 0 : cpumask_and(cpumask, cpumask, cpu_possible_mask);
5344 0 : if (!cpumask_empty(cpumask)) {
5345 0 : apply_wqattrs_lock();
5346 :
5347 : /* save the old wq_unbound_cpumask. */
5348 0 : cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5349 :
5350 : /* update wq_unbound_cpumask at first and apply it to wqs. */
5351 0 : cpumask_copy(wq_unbound_cpumask, cpumask);
5352 0 : ret = workqueue_apply_unbound_cpumask();
5353 :
5354 : /* restore the wq_unbound_cpumask when failed. */
5355 0 : if (ret < 0)
5356 0 : cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5357 :
5358 0 : apply_wqattrs_unlock();
5359 : }
5360 :
5361 0 : free_cpumask_var(saved_cpumask);
5362 0 : return ret;
5363 : }
5364 :
5365 : #ifdef CONFIG_SYSFS
5366 : /*
5367 : * Workqueues with WQ_SYSFS flag set is visible to userland via
5368 : * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5369 : * following attributes.
5370 : *
5371 : * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5372 : * max_active RW int : maximum number of in-flight work items
5373 : *
5374 : * Unbound workqueues have the following extra attributes.
5375 : *
5376 : * pool_ids RO int : the associated pool IDs for each node
5377 : * nice RW int : nice value of the workers
5378 : * cpumask RW mask : bitmask of allowed CPUs for the workers
5379 : * numa RW bool : whether enable NUMA affinity
5380 : */
5381 : struct wq_device {
5382 : struct workqueue_struct *wq;
5383 : struct device dev;
5384 : };
5385 :
5386 0 : static struct workqueue_struct *dev_to_wq(struct device *dev)
5387 : {
5388 0 : struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5389 :
5390 0 : return wq_dev->wq;
5391 : }
5392 :
5393 0 : static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5394 : char *buf)
5395 : {
5396 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5397 :
5398 0 : return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5399 : }
5400 : static DEVICE_ATTR_RO(per_cpu);
5401 :
5402 0 : static ssize_t max_active_show(struct device *dev,
5403 : struct device_attribute *attr, char *buf)
5404 : {
5405 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5406 :
5407 0 : return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5408 : }
5409 :
5410 0 : static ssize_t max_active_store(struct device *dev,
5411 : struct device_attribute *attr, const char *buf,
5412 : size_t count)
5413 : {
5414 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5415 0 : int val;
5416 :
5417 0 : if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5418 : return -EINVAL;
5419 :
5420 0 : workqueue_set_max_active(wq, val);
5421 0 : return count;
5422 : }
5423 : static DEVICE_ATTR_RW(max_active);
5424 :
5425 : static struct attribute *wq_sysfs_attrs[] = {
5426 : &dev_attr_per_cpu.attr,
5427 : &dev_attr_max_active.attr,
5428 : NULL,
5429 : };
5430 : ATTRIBUTE_GROUPS(wq_sysfs);
5431 :
5432 0 : static ssize_t wq_pool_ids_show(struct device *dev,
5433 : struct device_attribute *attr, char *buf)
5434 : {
5435 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5436 0 : const char *delim = "";
5437 0 : int node, written = 0;
5438 :
5439 0 : get_online_cpus();
5440 0 : rcu_read_lock();
5441 0 : for_each_node(node) {
5442 0 : written += scnprintf(buf + written, PAGE_SIZE - written,
5443 : "%s%d:%d", delim, node,
5444 0 : unbound_pwq_by_node(wq, node)->pool->id);
5445 0 : delim = " ";
5446 : }
5447 0 : written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5448 0 : rcu_read_unlock();
5449 0 : put_online_cpus();
5450 :
5451 0 : return written;
5452 : }
5453 :
5454 0 : static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5455 : char *buf)
5456 : {
5457 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5458 0 : int written;
5459 :
5460 0 : mutex_lock(&wq->mutex);
5461 0 : written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5462 0 : mutex_unlock(&wq->mutex);
5463 :
5464 0 : return written;
5465 : }
5466 :
5467 : /* prepare workqueue_attrs for sysfs store operations */
5468 0 : static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5469 : {
5470 0 : struct workqueue_attrs *attrs;
5471 :
5472 0 : lockdep_assert_held(&wq_pool_mutex);
5473 :
5474 0 : attrs = alloc_workqueue_attrs();
5475 0 : if (!attrs)
5476 : return NULL;
5477 :
5478 0 : copy_workqueue_attrs(attrs, wq->unbound_attrs);
5479 0 : return attrs;
5480 : }
5481 :
5482 0 : static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5483 : const char *buf, size_t count)
5484 : {
5485 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5486 0 : struct workqueue_attrs *attrs;
5487 0 : int ret = -ENOMEM;
5488 :
5489 0 : apply_wqattrs_lock();
5490 :
5491 0 : attrs = wq_sysfs_prep_attrs(wq);
5492 0 : if (!attrs)
5493 0 : goto out_unlock;
5494 :
5495 0 : if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5496 0 : attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5497 0 : ret = apply_workqueue_attrs_locked(wq, attrs);
5498 : else
5499 : ret = -EINVAL;
5500 :
5501 0 : out_unlock:
5502 0 : apply_wqattrs_unlock();
5503 0 : free_workqueue_attrs(attrs);
5504 0 : return ret ?: count;
5505 : }
5506 :
5507 0 : static ssize_t wq_cpumask_show(struct device *dev,
5508 : struct device_attribute *attr, char *buf)
5509 : {
5510 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5511 0 : int written;
5512 :
5513 0 : mutex_lock(&wq->mutex);
5514 0 : written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5515 0 : cpumask_pr_args(wq->unbound_attrs->cpumask));
5516 0 : mutex_unlock(&wq->mutex);
5517 0 : return written;
5518 : }
5519 :
5520 0 : static ssize_t wq_cpumask_store(struct device *dev,
5521 : struct device_attribute *attr,
5522 : const char *buf, size_t count)
5523 : {
5524 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5525 0 : struct workqueue_attrs *attrs;
5526 0 : int ret = -ENOMEM;
5527 :
5528 0 : apply_wqattrs_lock();
5529 :
5530 0 : attrs = wq_sysfs_prep_attrs(wq);
5531 0 : if (!attrs)
5532 0 : goto out_unlock;
5533 :
5534 0 : ret = cpumask_parse(buf, attrs->cpumask);
5535 0 : if (!ret)
5536 0 : ret = apply_workqueue_attrs_locked(wq, attrs);
5537 :
5538 0 : out_unlock:
5539 0 : apply_wqattrs_unlock();
5540 0 : free_workqueue_attrs(attrs);
5541 0 : return ret ?: count;
5542 : }
5543 :
5544 0 : static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5545 : char *buf)
5546 : {
5547 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5548 0 : int written;
5549 :
5550 0 : mutex_lock(&wq->mutex);
5551 0 : written = scnprintf(buf, PAGE_SIZE, "%d\n",
5552 0 : !wq->unbound_attrs->no_numa);
5553 0 : mutex_unlock(&wq->mutex);
5554 :
5555 0 : return written;
5556 : }
5557 :
5558 0 : static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5559 : const char *buf, size_t count)
5560 : {
5561 0 : struct workqueue_struct *wq = dev_to_wq(dev);
5562 0 : struct workqueue_attrs *attrs;
5563 0 : int v, ret = -ENOMEM;
5564 :
5565 0 : apply_wqattrs_lock();
5566 :
5567 0 : attrs = wq_sysfs_prep_attrs(wq);
5568 0 : if (!attrs)
5569 0 : goto out_unlock;
5570 :
5571 0 : ret = -EINVAL;
5572 0 : if (sscanf(buf, "%d", &v) == 1) {
5573 0 : attrs->no_numa = !v;
5574 0 : ret = apply_workqueue_attrs_locked(wq, attrs);
5575 : }
5576 :
5577 0 : out_unlock:
5578 0 : apply_wqattrs_unlock();
5579 0 : free_workqueue_attrs(attrs);
5580 0 : return ret ?: count;
5581 : }
5582 :
5583 : static struct device_attribute wq_sysfs_unbound_attrs[] = {
5584 : __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5585 : __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5586 : __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5587 : __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5588 : __ATTR_NULL,
5589 : };
5590 :
5591 : static struct bus_type wq_subsys = {
5592 : .name = "workqueue",
5593 : .dev_groups = wq_sysfs_groups,
5594 : };
5595 :
5596 0 : static ssize_t wq_unbound_cpumask_show(struct device *dev,
5597 : struct device_attribute *attr, char *buf)
5598 : {
5599 0 : int written;
5600 :
5601 0 : mutex_lock(&wq_pool_mutex);
5602 0 : written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5603 : cpumask_pr_args(wq_unbound_cpumask));
5604 0 : mutex_unlock(&wq_pool_mutex);
5605 :
5606 0 : return written;
5607 : }
5608 :
5609 0 : static ssize_t wq_unbound_cpumask_store(struct device *dev,
5610 : struct device_attribute *attr, const char *buf, size_t count)
5611 : {
5612 0 : cpumask_var_t cpumask;
5613 0 : int ret;
5614 :
5615 0 : if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5616 : return -ENOMEM;
5617 :
5618 0 : ret = cpumask_parse(buf, cpumask);
5619 0 : if (!ret)
5620 0 : ret = workqueue_set_unbound_cpumask(cpumask);
5621 :
5622 0 : free_cpumask_var(cpumask);
5623 0 : return ret ? ret : count;
5624 : }
5625 :
5626 : static struct device_attribute wq_sysfs_cpumask_attr =
5627 : __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5628 : wq_unbound_cpumask_store);
5629 :
5630 1 : static int __init wq_sysfs_init(void)
5631 : {
5632 1 : int err;
5633 :
5634 1 : err = subsys_virtual_register(&wq_subsys, NULL);
5635 1 : if (err)
5636 : return err;
5637 :
5638 1 : return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5639 : }
5640 : core_initcall(wq_sysfs_init);
5641 :
5642 0 : static void wq_device_release(struct device *dev)
5643 : {
5644 0 : struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5645 :
5646 0 : kfree(wq_dev);
5647 0 : }
5648 :
5649 : /**
5650 : * workqueue_sysfs_register - make a workqueue visible in sysfs
5651 : * @wq: the workqueue to register
5652 : *
5653 : * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5654 : * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5655 : * which is the preferred method.
5656 : *
5657 : * Workqueue user should use this function directly iff it wants to apply
5658 : * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5659 : * apply_workqueue_attrs() may race against userland updating the
5660 : * attributes.
5661 : *
5662 : * Return: 0 on success, -errno on failure.
5663 : */
5664 1 : int workqueue_sysfs_register(struct workqueue_struct *wq)
5665 : {
5666 1 : struct wq_device *wq_dev;
5667 1 : int ret;
5668 :
5669 : /*
5670 : * Adjusting max_active or creating new pwqs by applying
5671 : * attributes breaks ordering guarantee. Disallow exposing ordered
5672 : * workqueues.
5673 : */
5674 1 : if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5675 : return -EINVAL;
5676 :
5677 1 : wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5678 1 : if (!wq_dev)
5679 : return -ENOMEM;
5680 :
5681 1 : wq_dev->wq = wq;
5682 1 : wq_dev->dev.bus = &wq_subsys;
5683 1 : wq_dev->dev.release = wq_device_release;
5684 1 : dev_set_name(&wq_dev->dev, "%s", wq->name);
5685 :
5686 : /*
5687 : * unbound_attrs are created separately. Suppress uevent until
5688 : * everything is ready.
5689 : */
5690 1 : dev_set_uevent_suppress(&wq_dev->dev, true);
5691 :
5692 1 : ret = device_register(&wq_dev->dev);
5693 1 : if (ret) {
5694 0 : put_device(&wq_dev->dev);
5695 0 : wq->wq_dev = NULL;
5696 0 : return ret;
5697 : }
5698 :
5699 1 : if (wq->flags & WQ_UNBOUND) {
5700 : struct device_attribute *attr;
5701 :
5702 5 : for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5703 4 : ret = device_create_file(&wq_dev->dev, attr);
5704 4 : if (ret) {
5705 0 : device_unregister(&wq_dev->dev);
5706 0 : wq->wq_dev = NULL;
5707 0 : return ret;
5708 : }
5709 : }
5710 : }
5711 :
5712 1 : dev_set_uevent_suppress(&wq_dev->dev, false);
5713 1 : kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5714 1 : return 0;
5715 : }
5716 :
5717 : /**
5718 : * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5719 : * @wq: the workqueue to unregister
5720 : *
5721 : * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5722 : */
5723 1 : static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5724 : {
5725 1 : struct wq_device *wq_dev = wq->wq_dev;
5726 :
5727 1 : if (!wq->wq_dev)
5728 : return;
5729 :
5730 0 : wq->wq_dev = NULL;
5731 0 : device_unregister(&wq_dev->dev);
5732 : }
5733 : #else /* CONFIG_SYSFS */
5734 : static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5735 : #endif /* CONFIG_SYSFS */
5736 :
5737 : /*
5738 : * Workqueue watchdog.
5739 : *
5740 : * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5741 : * flush dependency, a concurrency managed work item which stays RUNNING
5742 : * indefinitely. Workqueue stalls can be very difficult to debug as the
5743 : * usual warning mechanisms don't trigger and internal workqueue state is
5744 : * largely opaque.
5745 : *
5746 : * Workqueue watchdog monitors all worker pools periodically and dumps
5747 : * state if some pools failed to make forward progress for a while where
5748 : * forward progress is defined as the first item on ->worklist changing.
5749 : *
5750 : * This mechanism is controlled through the kernel parameter
5751 : * "workqueue.watchdog_thresh" which can be updated at runtime through the
5752 : * corresponding sysfs parameter file.
5753 : */
5754 : #ifdef CONFIG_WQ_WATCHDOG
5755 :
5756 : static unsigned long wq_watchdog_thresh = 30;
5757 : static struct timer_list wq_watchdog_timer;
5758 :
5759 : static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5760 : static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5761 :
5762 : static void wq_watchdog_reset_touched(void)
5763 : {
5764 : int cpu;
5765 :
5766 : wq_watchdog_touched = jiffies;
5767 : for_each_possible_cpu(cpu)
5768 : per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5769 : }
5770 :
5771 : static void wq_watchdog_timer_fn(struct timer_list *unused)
5772 : {
5773 : unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5774 : bool lockup_detected = false;
5775 : struct worker_pool *pool;
5776 : int pi;
5777 :
5778 : if (!thresh)
5779 : return;
5780 :
5781 : rcu_read_lock();
5782 :
5783 : for_each_pool(pool, pi) {
5784 : unsigned long pool_ts, touched, ts;
5785 :
5786 : if (list_empty(&pool->worklist))
5787 : continue;
5788 :
5789 : /* get the latest of pool and touched timestamps */
5790 : pool_ts = READ_ONCE(pool->watchdog_ts);
5791 : touched = READ_ONCE(wq_watchdog_touched);
5792 :
5793 : if (time_after(pool_ts, touched))
5794 : ts = pool_ts;
5795 : else
5796 : ts = touched;
5797 :
5798 : if (pool->cpu >= 0) {
5799 : unsigned long cpu_touched =
5800 : READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5801 : pool->cpu));
5802 : if (time_after(cpu_touched, ts))
5803 : ts = cpu_touched;
5804 : }
5805 :
5806 : /* did we stall? */
5807 : if (time_after(jiffies, ts + thresh)) {
5808 : lockup_detected = true;
5809 : pr_emerg("BUG: workqueue lockup - pool");
5810 : pr_cont_pool_info(pool);
5811 : pr_cont(" stuck for %us!\n",
5812 : jiffies_to_msecs(jiffies - pool_ts) / 1000);
5813 : }
5814 : }
5815 :
5816 : rcu_read_unlock();
5817 :
5818 : if (lockup_detected)
5819 : show_workqueue_state();
5820 :
5821 : wq_watchdog_reset_touched();
5822 : mod_timer(&wq_watchdog_timer, jiffies + thresh);
5823 : }
5824 :
5825 : notrace void wq_watchdog_touch(int cpu)
5826 : {
5827 : if (cpu >= 0)
5828 : per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5829 : else
5830 : wq_watchdog_touched = jiffies;
5831 : }
5832 :
5833 : static void wq_watchdog_set_thresh(unsigned long thresh)
5834 : {
5835 : wq_watchdog_thresh = 0;
5836 : del_timer_sync(&wq_watchdog_timer);
5837 :
5838 : if (thresh) {
5839 : wq_watchdog_thresh = thresh;
5840 : wq_watchdog_reset_touched();
5841 : mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5842 : }
5843 : }
5844 :
5845 : static int wq_watchdog_param_set_thresh(const char *val,
5846 : const struct kernel_param *kp)
5847 : {
5848 : unsigned long thresh;
5849 : int ret;
5850 :
5851 : ret = kstrtoul(val, 0, &thresh);
5852 : if (ret)
5853 : return ret;
5854 :
5855 : if (system_wq)
5856 : wq_watchdog_set_thresh(thresh);
5857 : else
5858 : wq_watchdog_thresh = thresh;
5859 :
5860 : return 0;
5861 : }
5862 :
5863 : static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5864 : .set = wq_watchdog_param_set_thresh,
5865 : .get = param_get_ulong,
5866 : };
5867 :
5868 : module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5869 : 0644);
5870 :
5871 : static void wq_watchdog_init(void)
5872 : {
5873 : timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5874 : wq_watchdog_set_thresh(wq_watchdog_thresh);
5875 : }
5876 :
5877 : #else /* CONFIG_WQ_WATCHDOG */
5878 :
5879 1 : static inline void wq_watchdog_init(void) { }
5880 :
5881 : #endif /* CONFIG_WQ_WATCHDOG */
5882 :
5883 1 : static void __init wq_numa_init(void)
5884 : {
5885 1 : cpumask_var_t *tbl;
5886 1 : int node, cpu;
5887 :
5888 1 : if (num_possible_nodes() <= 1)
5889 : return;
5890 :
5891 0 : if (wq_disable_numa) {
5892 0 : pr_info("workqueue: NUMA affinity support disabled\n");
5893 0 : return;
5894 : }
5895 :
5896 0 : wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5897 0 : BUG_ON(!wq_update_unbound_numa_attrs_buf);
5898 :
5899 : /*
5900 : * We want masks of possible CPUs of each node which isn't readily
5901 : * available. Build one from cpu_to_node() which should have been
5902 : * fully initialized by now.
5903 : */
5904 0 : tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5905 0 : BUG_ON(!tbl);
5906 :
5907 0 : for_each_node(node)
5908 0 : BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5909 : node_online(node) ? node : NUMA_NO_NODE));
5910 :
5911 0 : for_each_possible_cpu(cpu) {
5912 0 : node = cpu_to_node(cpu);
5913 0 : if (WARN_ON(node == NUMA_NO_NODE)) {
5914 0 : pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5915 : /* happens iff arch is bonkers, let's just proceed */
5916 0 : return;
5917 : }
5918 0 : cpumask_set_cpu(cpu, tbl[node]);
5919 : }
5920 :
5921 0 : wq_numa_possible_cpumask = tbl;
5922 0 : wq_numa_enabled = true;
5923 : }
5924 :
5925 : /**
5926 : * workqueue_init_early - early init for workqueue subsystem
5927 : *
5928 : * This is the first half of two-staged workqueue subsystem initialization
5929 : * and invoked as soon as the bare basics - memory allocation, cpumasks and
5930 : * idr are up. It sets up all the data structures and system workqueues
5931 : * and allows early boot code to create workqueues and queue/cancel work
5932 : * items. Actual work item execution starts only after kthreads can be
5933 : * created and scheduled right before early initcalls.
5934 : */
5935 1 : void __init workqueue_init_early(void)
5936 : {
5937 1 : int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5938 1 : int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5939 1 : int i, cpu;
5940 :
5941 1 : BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5942 :
5943 1 : BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5944 1 : cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
5945 :
5946 1 : pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5947 :
5948 : /* initialize CPU pools */
5949 5 : for_each_possible_cpu(cpu) {
5950 4 : struct worker_pool *pool;
5951 :
5952 4 : i = 0;
5953 12 : for_each_cpu_worker_pool(pool, cpu) {
5954 8 : BUG_ON(init_worker_pool(pool));
5955 8 : pool->cpu = cpu;
5956 8 : cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5957 8 : pool->attrs->nice = std_nice[i++];
5958 8 : pool->node = cpu_to_node(cpu);
5959 :
5960 : /* alloc pool ID */
5961 8 : mutex_lock(&wq_pool_mutex);
5962 8 : BUG_ON(worker_pool_assign_id(pool));
5963 8 : mutex_unlock(&wq_pool_mutex);
5964 : }
5965 : }
5966 :
5967 : /* create default unbound and ordered wq attrs */
5968 3 : for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5969 2 : struct workqueue_attrs *attrs;
5970 :
5971 2 : BUG_ON(!(attrs = alloc_workqueue_attrs()));
5972 2 : attrs->nice = std_nice[i];
5973 2 : unbound_std_wq_attrs[i] = attrs;
5974 :
5975 : /*
5976 : * An ordered wq should have only one pwq as ordering is
5977 : * guaranteed by max_active which is enforced by pwqs.
5978 : * Turn off NUMA so that dfl_pwq is used for all nodes.
5979 : */
5980 2 : BUG_ON(!(attrs = alloc_workqueue_attrs()));
5981 2 : attrs->nice = std_nice[i];
5982 2 : attrs->no_numa = true;
5983 2 : ordered_wq_attrs[i] = attrs;
5984 : }
5985 :
5986 1 : system_wq = alloc_workqueue("events", 0, 0);
5987 1 : system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5988 1 : system_long_wq = alloc_workqueue("events_long", 0, 0);
5989 2 : system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5990 1 : WQ_UNBOUND_MAX_ACTIVE);
5991 1 : system_freezable_wq = alloc_workqueue("events_freezable",
5992 : WQ_FREEZABLE, 0);
5993 1 : system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5994 : WQ_POWER_EFFICIENT, 0);
5995 1 : system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5996 : WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5997 : 0);
5998 1 : BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5999 : !system_unbound_wq || !system_freezable_wq ||
6000 : !system_power_efficient_wq ||
6001 : !system_freezable_power_efficient_wq);
6002 1 : }
6003 :
6004 : /**
6005 : * workqueue_init - bring workqueue subsystem fully online
6006 : *
6007 : * This is the latter half of two-staged workqueue subsystem initialization
6008 : * and invoked as soon as kthreads can be created and scheduled.
6009 : * Workqueues have been created and work items queued on them, but there
6010 : * are no kworkers executing the work items yet. Populate the worker pools
6011 : * with the initial workers and enable future kworker creations.
6012 : */
6013 1 : void __init workqueue_init(void)
6014 : {
6015 1 : struct workqueue_struct *wq;
6016 1 : struct worker_pool *pool;
6017 1 : int cpu, bkt;
6018 :
6019 : /*
6020 : * It'd be simpler to initialize NUMA in workqueue_init_early() but
6021 : * CPU to node mapping may not be available that early on some
6022 : * archs such as power and arm64. As per-cpu pools created
6023 : * previously could be missing node hint and unbound pools NUMA
6024 : * affinity, fix them up.
6025 : *
6026 : * Also, while iterating workqueues, create rescuers if requested.
6027 : */
6028 1 : wq_numa_init();
6029 :
6030 1 : mutex_lock(&wq_pool_mutex);
6031 :
6032 6 : for_each_possible_cpu(cpu) {
6033 12 : for_each_cpu_worker_pool(pool, cpu) {
6034 8 : pool->node = cpu_to_node(cpu);
6035 : }
6036 : }
6037 :
6038 10 : list_for_each_entry(wq, &workqueues, list) {
6039 9 : wq_update_unbound_numa(wq, smp_processor_id(), true);
6040 9 : WARN(init_rescuer(wq),
6041 : "workqueue: failed to create early rescuer for %s",
6042 : wq->name);
6043 : }
6044 :
6045 1 : mutex_unlock(&wq_pool_mutex);
6046 :
6047 : /* create the initial workers */
6048 3 : for_each_online_cpu(cpu) {
6049 3 : for_each_cpu_worker_pool(pool, cpu) {
6050 2 : pool->flags &= ~POOL_DISASSOCIATED;
6051 2 : BUG_ON(!create_worker(pool));
6052 : }
6053 : }
6054 :
6055 66 : hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6056 1 : BUG_ON(!create_worker(pool));
6057 :
6058 1 : wq_online = true;
6059 1 : wq_watchdog_init();
6060 1 : }
|