Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * kernel/stop_machine.c
4 : *
5 : * Copyright (C) 2008, 2005 IBM Corporation.
6 : * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 : * Copyright (C) 2010 SUSE Linux Products GmbH
8 : * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 : */
10 : #include <linux/compiler.h>
11 : #include <linux/completion.h>
12 : #include <linux/cpu.h>
13 : #include <linux/init.h>
14 : #include <linux/kthread.h>
15 : #include <linux/export.h>
16 : #include <linux/percpu.h>
17 : #include <linux/sched.h>
18 : #include <linux/stop_machine.h>
19 : #include <linux/interrupt.h>
20 : #include <linux/kallsyms.h>
21 : #include <linux/smpboot.h>
22 : #include <linux/atomic.h>
23 : #include <linux/nmi.h>
24 : #include <linux/sched/wake_q.h>
25 :
26 : /*
27 : * Structure to determine completion condition and record errors. May
28 : * be shared by works on different cpus.
29 : */
30 : struct cpu_stop_done {
31 : atomic_t nr_todo; /* nr left to execute */
32 : int ret; /* collected return value */
33 : struct completion completion; /* fired if nr_todo reaches 0 */
34 : };
35 :
36 : /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 : struct cpu_stopper {
38 : struct task_struct *thread;
39 :
40 : raw_spinlock_t lock;
41 : bool enabled; /* is this stopper enabled? */
42 : struct list_head works; /* list of pending works */
43 :
44 : struct cpu_stop_work stop_work; /* for stop_cpus */
45 : unsigned long caller;
46 : cpu_stop_fn_t fn;
47 : };
48 :
49 : static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
50 : static bool stop_machine_initialized = false;
51 :
52 1 : void print_stop_info(const char *log_lvl, struct task_struct *task)
53 : {
54 : /*
55 : * If @task is a stopper task, it cannot migrate and task_cpu() is
56 : * stable.
57 : */
58 1 : struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
59 :
60 1 : if (task != stopper->thread)
61 : return;
62 :
63 0 : printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
64 : }
65 :
66 : /* static data for stop_cpus */
67 : static DEFINE_MUTEX(stop_cpus_mutex);
68 : static bool stop_cpus_in_progress;
69 :
70 33 : static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
71 : {
72 33 : memset(done, 0, sizeof(*done));
73 33 : atomic_set(&done->nr_todo, nr_todo);
74 33 : init_completion(&done->completion);
75 33 : }
76 :
77 : /* signal completion unless @done is NULL */
78 34 : static void cpu_stop_signal_done(struct cpu_stop_done *done)
79 : {
80 70 : if (atomic_dec_and_test(&done->nr_todo))
81 33 : complete(&done->completion);
82 36 : }
83 :
84 37 : static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
85 : struct cpu_stop_work *work,
86 : struct wake_q_head *wakeq)
87 : {
88 37 : list_add_tail(&work->list, &stopper->works);
89 37 : wake_q_add(wakeq, stopper->thread);
90 37 : }
91 :
92 : /* queue @work to @stopper. if offline, @work is completed immediately */
93 37 : static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
94 : {
95 37 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
96 37 : DEFINE_WAKE_Q(wakeq);
97 37 : unsigned long flags;
98 37 : bool enabled;
99 :
100 37 : preempt_disable();
101 37 : raw_spin_lock_irqsave(&stopper->lock, flags);
102 37 : enabled = stopper->enabled;
103 37 : if (enabled)
104 37 : __cpu_stop_queue_work(stopper, work, &wakeq);
105 0 : else if (work->done)
106 0 : cpu_stop_signal_done(work->done);
107 37 : raw_spin_unlock_irqrestore(&stopper->lock, flags);
108 :
109 37 : wake_up_q(&wakeq);
110 37 : preempt_enable();
111 :
112 37 : return enabled;
113 : }
114 :
115 : /**
116 : * stop_one_cpu - stop a cpu
117 : * @cpu: cpu to stop
118 : * @fn: function to execute
119 : * @arg: argument to @fn
120 : *
121 : * Execute @fn(@arg) on @cpu. @fn is run in a process context with
122 : * the highest priority preempting any task on the cpu and
123 : * monopolizing it. This function returns after the execution is
124 : * complete.
125 : *
126 : * This function doesn't guarantee @cpu stays online till @fn
127 : * completes. If @cpu goes down in the middle, execution may happen
128 : * partially or fully on different cpus. @fn should either be ready
129 : * for that or the caller should ensure that @cpu stays online until
130 : * this function completes.
131 : *
132 : * CONTEXT:
133 : * Might sleep.
134 : *
135 : * RETURNS:
136 : * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
137 : * otherwise, the return value of @fn.
138 : */
139 32 : int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
140 : {
141 32 : struct cpu_stop_done done;
142 32 : struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
143 :
144 32 : cpu_stop_init_done(&done, 1);
145 32 : if (!cpu_stop_queue_work(cpu, &work))
146 : return -ENOENT;
147 : /*
148 : * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
149 : * cycle by doing a preemption:
150 : */
151 32 : cond_resched();
152 32 : wait_for_completion(&done.completion);
153 32 : return done.ret;
154 : }
155 :
156 : /* This controls the threads on each CPU. */
157 : enum multi_stop_state {
158 : /* Dummy starting state for thread. */
159 : MULTI_STOP_NONE,
160 : /* Awaiting everyone to be scheduled. */
161 : MULTI_STOP_PREPARE,
162 : /* Disable interrupts. */
163 : MULTI_STOP_DISABLE_IRQ,
164 : /* Run the function */
165 : MULTI_STOP_RUN,
166 : /* Exit */
167 : MULTI_STOP_EXIT,
168 : };
169 :
170 : struct multi_stop_data {
171 : cpu_stop_fn_t fn;
172 : void *data;
173 : /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
174 : unsigned int num_threads;
175 : const struct cpumask *active_cpus;
176 :
177 : enum multi_stop_state state;
178 : atomic_t thread_ack;
179 : };
180 :
181 5 : static void set_state(struct multi_stop_data *msdata,
182 : enum multi_stop_state newstate)
183 : {
184 : /* Reset ack counter. */
185 10 : atomic_set(&msdata->thread_ack, msdata->num_threads);
186 5 : smp_wmb();
187 5 : WRITE_ONCE(msdata->state, newstate);
188 4 : }
189 :
190 : /* Last one to ack a state moves to the next state. */
191 13 : static void ack_state(struct multi_stop_data *msdata)
192 : {
193 27 : if (atomic_dec_and_test(&msdata->thread_ack))
194 4 : set_state(msdata, msdata->state + 1);
195 14 : }
196 :
197 127 : notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
198 : {
199 127 : cpu_relax();
200 133 : }
201 :
202 : /* This is the cpu_stop function which stops the CPU. */
203 4 : static int multi_cpu_stop(void *data)
204 : {
205 4 : struct multi_stop_data *msdata = data;
206 4 : enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
207 4 : int cpu = smp_processor_id(), err = 0;
208 4 : const struct cpumask *cpumask;
209 4 : unsigned long flags;
210 4 : bool is_active;
211 :
212 : /*
213 : * When called from stop_machine_from_inactive_cpu(), irq might
214 : * already be disabled. Save the state and restore it on exit.
215 : */
216 4 : local_save_flags(flags);
217 :
218 3 : if (!msdata->active_cpus) {
219 3 : cpumask = cpu_online_mask;
220 3 : is_active = cpu == cpumask_first(cpumask);
221 : } else {
222 0 : cpumask = msdata->active_cpus;
223 0 : is_active = cpumask_test_cpu(cpu, cpumask);
224 : }
225 :
226 : /* Simple state machine */
227 132 : do {
228 : /* Chill out and ensure we re-read multi_stop_state. */
229 132 : stop_machine_yield(cpumask);
230 128 : newstate = READ_ONCE(msdata->state);
231 128 : if (newstate != curstate) {
232 13 : curstate = newstate;
233 13 : switch (curstate) {
234 : case MULTI_STOP_DISABLE_IRQ:
235 3 : local_irq_disable();
236 : hard_irq_disable();
237 : break;
238 3 : case MULTI_STOP_RUN:
239 3 : if (is_active)
240 1 : err = msdata->fn(msdata->data);
241 : break;
242 : default:
243 : break;
244 : }
245 12 : ack_state(msdata);
246 : } else if (curstate > MULTI_STOP_PREPARE) {
247 : /*
248 : * At this stage all other CPUs we depend on must spin
249 : * in the same loop. Any reason for hard-lockup should
250 : * be detected and reported on their side.
251 : */
252 : touch_nmi_watchdog();
253 : }
254 130 : rcu_momentary_dyntick_idle();
255 131 : } while (curstate != MULTI_STOP_EXIT);
256 :
257 3 : local_irq_restore(flags);
258 2 : return err;
259 : }
260 :
261 0 : static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
262 : int cpu2, struct cpu_stop_work *work2)
263 : {
264 0 : struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
265 0 : struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
266 0 : DEFINE_WAKE_Q(wakeq);
267 0 : int err;
268 :
269 0 : retry:
270 : /*
271 : * The waking up of stopper threads has to happen in the same
272 : * scheduling context as the queueing. Otherwise, there is a
273 : * possibility of one of the above stoppers being woken up by another
274 : * CPU, and preempting us. This will cause us to not wake up the other
275 : * stopper forever.
276 : */
277 0 : preempt_disable();
278 0 : raw_spin_lock_irq(&stopper1->lock);
279 0 : raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
280 :
281 0 : if (!stopper1->enabled || !stopper2->enabled) {
282 0 : err = -ENOENT;
283 0 : goto unlock;
284 : }
285 :
286 : /*
287 : * Ensure that if we race with __stop_cpus() the stoppers won't get
288 : * queued up in reverse order leading to system deadlock.
289 : *
290 : * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
291 : * queued a work on cpu1 but not on cpu2, we hold both locks.
292 : *
293 : * It can be falsely true but it is safe to spin until it is cleared,
294 : * queue_stop_cpus_work() does everything under preempt_disable().
295 : */
296 0 : if (unlikely(stop_cpus_in_progress)) {
297 0 : err = -EDEADLK;
298 0 : goto unlock;
299 : }
300 :
301 0 : err = 0;
302 0 : __cpu_stop_queue_work(stopper1, work1, &wakeq);
303 0 : __cpu_stop_queue_work(stopper2, work2, &wakeq);
304 :
305 0 : unlock:
306 0 : raw_spin_unlock(&stopper2->lock);
307 0 : raw_spin_unlock_irq(&stopper1->lock);
308 :
309 0 : if (unlikely(err == -EDEADLK)) {
310 0 : preempt_enable();
311 :
312 0 : while (stop_cpus_in_progress)
313 0 : cpu_relax();
314 :
315 0 : goto retry;
316 : }
317 :
318 0 : wake_up_q(&wakeq);
319 0 : preempt_enable();
320 :
321 0 : return err;
322 : }
323 : /**
324 : * stop_two_cpus - stops two cpus
325 : * @cpu1: the cpu to stop
326 : * @cpu2: the other cpu to stop
327 : * @fn: function to execute
328 : * @arg: argument to @fn
329 : *
330 : * Stops both the current and specified CPU and runs @fn on one of them.
331 : *
332 : * returns when both are completed.
333 : */
334 0 : int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
335 : {
336 0 : struct cpu_stop_done done;
337 0 : struct cpu_stop_work work1, work2;
338 0 : struct multi_stop_data msdata;
339 :
340 0 : msdata = (struct multi_stop_data){
341 : .fn = fn,
342 : .data = arg,
343 : .num_threads = 2,
344 0 : .active_cpus = cpumask_of(cpu1),
345 : };
346 :
347 0 : work1 = work2 = (struct cpu_stop_work){
348 : .fn = multi_cpu_stop,
349 : .arg = &msdata,
350 : .done = &done,
351 0 : .caller = _RET_IP_,
352 : };
353 :
354 0 : cpu_stop_init_done(&done, 2);
355 0 : set_state(&msdata, MULTI_STOP_PREPARE);
356 :
357 0 : if (cpu1 > cpu2)
358 0 : swap(cpu1, cpu2);
359 0 : if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
360 : return -ENOENT;
361 :
362 0 : wait_for_completion(&done.completion);
363 0 : return done.ret;
364 : }
365 :
366 : /**
367 : * stop_one_cpu_nowait - stop a cpu but don't wait for completion
368 : * @cpu: cpu to stop
369 : * @fn: function to execute
370 : * @arg: argument to @fn
371 : * @work_buf: pointer to cpu_stop_work structure
372 : *
373 : * Similar to stop_one_cpu() but doesn't wait for completion. The
374 : * caller is responsible for ensuring @work_buf is currently unused
375 : * and will remain untouched until stopper starts executing @fn.
376 : *
377 : * CONTEXT:
378 : * Don't care.
379 : *
380 : * RETURNS:
381 : * true if cpu_stop_work was queued successfully and @fn will be called,
382 : * false otherwise.
383 : */
384 1 : bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
385 : struct cpu_stop_work *work_buf)
386 : {
387 1 : *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
388 1 : return cpu_stop_queue_work(cpu, work_buf);
389 : }
390 :
391 1 : static bool queue_stop_cpus_work(const struct cpumask *cpumask,
392 : cpu_stop_fn_t fn, void *arg,
393 : struct cpu_stop_done *done)
394 : {
395 1 : struct cpu_stop_work *work;
396 1 : unsigned int cpu;
397 1 : bool queued = false;
398 :
399 : /*
400 : * Disable preemption while queueing to avoid getting
401 : * preempted by a stopper which might wait for other stoppers
402 : * to enter @fn which can lead to deadlock.
403 : */
404 1 : preempt_disable();
405 1 : stop_cpus_in_progress = true;
406 1 : barrier();
407 6 : for_each_cpu(cpu, cpumask) {
408 4 : work = &per_cpu(cpu_stopper.stop_work, cpu);
409 4 : work->fn = fn;
410 4 : work->arg = arg;
411 4 : work->done = done;
412 4 : if (cpu_stop_queue_work(cpu, work))
413 4 : queued = true;
414 : }
415 1 : barrier();
416 1 : stop_cpus_in_progress = false;
417 1 : preempt_enable();
418 :
419 1 : return queued;
420 : }
421 :
422 1 : static int __stop_cpus(const struct cpumask *cpumask,
423 : cpu_stop_fn_t fn, void *arg)
424 : {
425 1 : struct cpu_stop_done done;
426 :
427 1 : cpu_stop_init_done(&done, cpumask_weight(cpumask));
428 1 : if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
429 : return -ENOENT;
430 1 : wait_for_completion(&done.completion);
431 1 : return done.ret;
432 : }
433 :
434 : /**
435 : * stop_cpus - stop multiple cpus
436 : * @cpumask: cpus to stop
437 : * @fn: function to execute
438 : * @arg: argument to @fn
439 : *
440 : * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
441 : * @fn is run in a process context with the highest priority
442 : * preempting any task on the cpu and monopolizing it. This function
443 : * returns after all executions are complete.
444 : *
445 : * This function doesn't guarantee the cpus in @cpumask stay online
446 : * till @fn completes. If some cpus go down in the middle, execution
447 : * on the cpu may happen partially or fully on different cpus. @fn
448 : * should either be ready for that or the caller should ensure that
449 : * the cpus stay online until this function completes.
450 : *
451 : * All stop_cpus() calls are serialized making it safe for @fn to wait
452 : * for all cpus to start executing it.
453 : *
454 : * CONTEXT:
455 : * Might sleep.
456 : *
457 : * RETURNS:
458 : * -ENOENT if @fn(@arg) was not executed at all because all cpus in
459 : * @cpumask were offline; otherwise, 0 if all executions of @fn
460 : * returned 0, any non zero return value if any returned non zero.
461 : */
462 1 : static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
463 : {
464 1 : int ret;
465 :
466 : /* static works are used, process one request at a time */
467 1 : mutex_lock(&stop_cpus_mutex);
468 1 : ret = __stop_cpus(cpumask, fn, arg);
469 1 : mutex_unlock(&stop_cpus_mutex);
470 1 : return ret;
471 : }
472 :
473 77 : static int cpu_stop_should_run(unsigned int cpu)
474 : {
475 77 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
476 77 : unsigned long flags;
477 77 : int run;
478 :
479 77 : raw_spin_lock_irqsave(&stopper->lock, flags);
480 78 : run = !list_empty(&stopper->works);
481 78 : raw_spin_unlock_irqrestore(&stopper->lock, flags);
482 78 : return run;
483 : }
484 :
485 37 : static void cpu_stopper_thread(unsigned int cpu)
486 : {
487 37 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
488 74 : struct cpu_stop_work *work;
489 :
490 74 : repeat:
491 74 : work = NULL;
492 74 : raw_spin_lock_irq(&stopper->lock);
493 74 : if (!list_empty(&stopper->works)) {
494 37 : work = list_first_entry(&stopper->works,
495 : struct cpu_stop_work, list);
496 37 : list_del_init(&work->list);
497 : }
498 74 : raw_spin_unlock_irq(&stopper->lock);
499 :
500 74 : if (work) {
501 37 : cpu_stop_fn_t fn = work->fn;
502 37 : void *arg = work->arg;
503 37 : struct cpu_stop_done *done = work->done;
504 37 : int ret;
505 :
506 : /* cpu stop callbacks must not sleep, make in_atomic() == T */
507 37 : stopper->caller = work->caller;
508 37 : stopper->fn = fn;
509 37 : preempt_count_inc();
510 37 : ret = fn(arg);
511 35 : if (done) {
512 34 : if (ret)
513 0 : done->ret = ret;
514 34 : cpu_stop_signal_done(done);
515 : }
516 37 : preempt_count_dec();
517 37 : stopper->fn = NULL;
518 37 : stopper->caller = 0;
519 37 : WARN_ONCE(preempt_count(),
520 : "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
521 37 : goto repeat;
522 : }
523 37 : }
524 :
525 0 : void stop_machine_park(int cpu)
526 : {
527 0 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
528 : /*
529 : * Lockless. cpu_stopper_thread() will take stopper->lock and flush
530 : * the pending works before it parks, until then it is fine to queue
531 : * the new works.
532 : */
533 0 : stopper->enabled = false;
534 0 : kthread_park(stopper->thread);
535 0 : }
536 :
537 : extern void sched_set_stop_task(int cpu, struct task_struct *stop);
538 :
539 4 : static void cpu_stop_create(unsigned int cpu)
540 : {
541 4 : sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
542 4 : }
543 :
544 0 : static void cpu_stop_park(unsigned int cpu)
545 : {
546 0 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
547 :
548 0 : WARN_ON(!list_empty(&stopper->works));
549 0 : }
550 :
551 4 : void stop_machine_unpark(int cpu)
552 : {
553 4 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
554 :
555 4 : stopper->enabled = true;
556 4 : kthread_unpark(stopper->thread);
557 4 : }
558 :
559 : static struct smp_hotplug_thread cpu_stop_threads = {
560 : .store = &cpu_stopper.thread,
561 : .thread_should_run = cpu_stop_should_run,
562 : .thread_fn = cpu_stopper_thread,
563 : .thread_comm = "migration/%u",
564 : .create = cpu_stop_create,
565 : .park = cpu_stop_park,
566 : .selfparking = true,
567 : };
568 :
569 1 : static int __init cpu_stop_init(void)
570 : {
571 1 : unsigned int cpu;
572 :
573 6 : for_each_possible_cpu(cpu) {
574 4 : struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
575 :
576 4 : raw_spin_lock_init(&stopper->lock);
577 5 : INIT_LIST_HEAD(&stopper->works);
578 : }
579 :
580 1 : BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
581 1 : stop_machine_unpark(raw_smp_processor_id());
582 1 : stop_machine_initialized = true;
583 1 : return 0;
584 : }
585 : early_initcall(cpu_stop_init);
586 :
587 1 : int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
588 : const struct cpumask *cpus)
589 : {
590 1 : struct multi_stop_data msdata = {
591 : .fn = fn,
592 : .data = data,
593 : .num_threads = num_online_cpus(),
594 : .active_cpus = cpus,
595 : };
596 :
597 1 : lockdep_assert_cpus_held();
598 :
599 1 : if (!stop_machine_initialized) {
600 : /*
601 : * Handle the case where stop_machine() is called
602 : * early in boot before stop_machine() has been
603 : * initialized.
604 : */
605 0 : unsigned long flags;
606 0 : int ret;
607 :
608 0 : WARN_ON_ONCE(msdata.num_threads != 1);
609 :
610 0 : local_irq_save(flags);
611 0 : hard_irq_disable();
612 0 : ret = (*fn)(data);
613 0 : local_irq_restore(flags);
614 :
615 0 : return ret;
616 : }
617 :
618 : /* Set the initial state and stop all online cpus. */
619 1 : set_state(&msdata, MULTI_STOP_PREPARE);
620 1 : return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
621 : }
622 :
623 1 : int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
624 : {
625 1 : int ret;
626 :
627 : /* No CPUs can come up or down during this. */
628 1 : cpus_read_lock();
629 1 : ret = stop_machine_cpuslocked(fn, data, cpus);
630 1 : cpus_read_unlock();
631 1 : return ret;
632 : }
633 : EXPORT_SYMBOL_GPL(stop_machine);
634 :
635 : /**
636 : * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
637 : * @fn: the function to run
638 : * @data: the data ptr for the @fn()
639 : * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
640 : *
641 : * This is identical to stop_machine() but can be called from a CPU which
642 : * is not active. The local CPU is in the process of hotplug (so no other
643 : * CPU hotplug can start) and not marked active and doesn't have enough
644 : * context to sleep.
645 : *
646 : * This function provides stop_machine() functionality for such state by
647 : * using busy-wait for synchronization and executing @fn directly for local
648 : * CPU.
649 : *
650 : * CONTEXT:
651 : * Local CPU is inactive. Temporarily stops all active CPUs.
652 : *
653 : * RETURNS:
654 : * 0 if all executions of @fn returned 0, any non zero return value if any
655 : * returned non zero.
656 : */
657 0 : int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
658 : const struct cpumask *cpus)
659 : {
660 0 : struct multi_stop_data msdata = { .fn = fn, .data = data,
661 : .active_cpus = cpus };
662 0 : struct cpu_stop_done done;
663 0 : int ret;
664 :
665 : /* Local CPU must be inactive and CPU hotplug in progress. */
666 0 : BUG_ON(cpu_active(raw_smp_processor_id()));
667 0 : msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
668 :
669 : /* No proper task established and can't sleep - busy wait for lock. */
670 0 : while (!mutex_trylock(&stop_cpus_mutex))
671 0 : cpu_relax();
672 :
673 : /* Schedule work on other CPUs and execute directly for local CPU */
674 0 : set_state(&msdata, MULTI_STOP_PREPARE);
675 0 : cpu_stop_init_done(&done, num_active_cpus());
676 0 : queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
677 : &done);
678 0 : ret = multi_cpu_stop(&msdata);
679 :
680 : /* Busy wait for completion. */
681 0 : while (!completion_done(&done.completion))
682 0 : cpu_relax();
683 :
684 0 : mutex_unlock(&stop_cpus_mutex);
685 0 : return ret ?: done.ret;
686 : }
|