Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Generic entry points for the idle threads and
4 : * implementation of the idle task scheduling class.
5 : *
6 : * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 : * tasks which are handled in sched/fair.c )
8 : */
9 : #include "sched.h"
10 :
11 : #include <trace/events/power.h>
12 :
13 : /* Linker adds these: start and end of __cpuidle functions */
14 : extern char __cpuidle_text_start[], __cpuidle_text_end[];
15 :
16 : /**
17 : * sched_idle_set_state - Record idle state for the current CPU.
18 : * @idle_state: State to record.
19 : */
20 0 : void sched_idle_set_state(struct cpuidle_state *idle_state)
21 : {
22 0 : idle_set_state(this_rq(), idle_state);
23 0 : }
24 :
25 : static int __read_mostly cpu_idle_force_poll;
26 :
27 0 : void cpu_idle_poll_ctrl(bool enable)
28 : {
29 0 : if (enable) {
30 0 : cpu_idle_force_poll++;
31 : } else {
32 0 : cpu_idle_force_poll--;
33 0 : WARN_ON_ONCE(cpu_idle_force_poll < 0);
34 : }
35 0 : }
36 :
37 : #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
38 : static int __init cpu_idle_poll_setup(char *__unused)
39 : {
40 : cpu_idle_force_poll = 1;
41 :
42 : return 1;
43 : }
44 : __setup("nohlt", cpu_idle_poll_setup);
45 :
46 : static int __init cpu_idle_nopoll_setup(char *__unused)
47 : {
48 : cpu_idle_force_poll = 0;
49 :
50 : return 1;
51 : }
52 : __setup("hlt", cpu_idle_nopoll_setup);
53 : #endif
54 :
55 0 : static noinline int __cpuidle cpu_idle_poll(void)
56 : {
57 0 : trace_cpu_idle(0, smp_processor_id());
58 0 : stop_critical_timings();
59 0 : rcu_idle_enter();
60 0 : local_irq_enable();
61 :
62 0 : while (!tif_need_resched() &&
63 0 : (cpu_idle_force_poll || tick_check_broadcast_expired()))
64 0 : cpu_relax();
65 :
66 0 : rcu_idle_exit();
67 0 : start_critical_timings();
68 0 : trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
69 :
70 0 : return 1;
71 : }
72 :
73 : /* Weak implementations for optional arch specific functions */
74 4 : void __weak arch_cpu_idle_prepare(void) { }
75 0 : void __weak arch_cpu_idle_enter(void) { }
76 15339 : void __weak arch_cpu_idle_exit(void) { }
77 0 : void __weak arch_cpu_idle_dead(void) { }
78 0 : void __weak arch_cpu_idle(void)
79 : {
80 0 : cpu_idle_force_poll = 1;
81 0 : raw_local_irq_enable();
82 0 : }
83 :
84 : /**
85 : * default_idle_call - Default CPU idle routine.
86 : *
87 : * To use when the cpuidle framework cannot be used.
88 : */
89 15329 : void __cpuidle default_idle_call(void)
90 : {
91 15329 : if (current_clr_polling_and_test()) {
92 17 : local_irq_enable();
93 : } else {
94 :
95 15364 : trace_cpu_idle(1, smp_processor_id());
96 15367 : stop_critical_timings();
97 :
98 : /*
99 : * arch_cpu_idle() is supposed to enable IRQs, however
100 : * we can't do that because of RCU and tracing.
101 : *
102 : * Trace IRQs enable here, then switch off RCU, and have
103 : * arch_cpu_idle() use raw_local_irq_enable(). Note that
104 : * rcu_idle_enter() relies on lockdep IRQ state, so switch that
105 : * last -- this is very similar to the entry code.
106 : */
107 15367 : trace_hardirqs_on_prepare();
108 15387 : lockdep_hardirqs_on_prepare(_THIS_IP_);
109 15384 : rcu_idle_enter();
110 15389 : lockdep_hardirqs_on(_THIS_IP_);
111 :
112 15381 : arch_cpu_idle();
113 :
114 : /*
115 : * OK, so IRQs are enabled here, but RCU needs them disabled to
116 : * turn itself back on.. funny thing is that disabling IRQs
117 : * will cause tracing, which needs RCU. Jump through hoops to
118 : * make it 'work'.
119 : */
120 15282 : raw_local_irq_disable();
121 15376 : lockdep_hardirqs_off(_THIS_IP_);
122 15344 : rcu_idle_exit();
123 15362 : lockdep_hardirqs_on(_THIS_IP_);
124 15369 : raw_local_irq_enable();
125 :
126 15374 : start_critical_timings();
127 15374 : trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
128 : }
129 15375 : }
130 :
131 : static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
132 : struct cpuidle_device *dev)
133 : {
134 : if (current_clr_polling_and_test())
135 : return -EBUSY;
136 :
137 : return cpuidle_enter_s2idle(drv, dev);
138 : }
139 :
140 : static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
141 : int next_state)
142 : {
143 : /*
144 : * The idle task must be scheduled, it is pointless to go to idle, just
145 : * update no idle residency and return.
146 : */
147 : if (current_clr_polling_and_test()) {
148 : dev->last_residency_ns = 0;
149 : local_irq_enable();
150 : return -EBUSY;
151 : }
152 :
153 : /*
154 : * Enter the idle state previously returned by the governor decision.
155 : * This function will block until an interrupt occurs and will take
156 : * care of re-enabling the local interrupts
157 : */
158 : return cpuidle_enter(drv, dev, next_state);
159 : }
160 :
161 : /**
162 : * cpuidle_idle_call - the main idle function
163 : *
164 : * NOTE: no locks or semaphores should be used here
165 : *
166 : * On archs that support TIF_POLLING_NRFLAG, is called with polling
167 : * set, and it returns with polling set. If it ever stops polling, it
168 : * must clear the polling bit.
169 : */
170 15369 : static void cpuidle_idle_call(void)
171 : {
172 15369 : struct cpuidle_device *dev = cpuidle_get_device();
173 15369 : struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
174 15369 : int next_state, entered_state;
175 :
176 : /*
177 : * Check if the idle task must be rescheduled. If it is the
178 : * case, exit the function after re-enabling the local irq.
179 : */
180 15369 : if (need_resched()) {
181 6 : local_irq_enable();
182 6 : return;
183 : }
184 :
185 : /*
186 : * The RCU framework needs to be told that we are entering an idle
187 : * section, so no more rcu read side critical sections and one more
188 : * step to the grace period
189 : */
190 :
191 15378 : if (cpuidle_not_available(drv, dev)) {
192 15378 : tick_nohz_idle_stop_tick();
193 :
194 15403 : default_idle_call();
195 15366 : goto exit_idle;
196 : }
197 :
198 : /*
199 : * Suspend-to-idle ("s2idle") is a system state in which all user space
200 : * has been frozen, all I/O devices have been suspended and the only
201 : * activity happens here and in interrupts (if any). In that case bypass
202 : * the cpuidle governor and go stratight for the deepest idle state
203 : * available. Possibly also suspend the local tick and the entire
204 : * timekeeping to prevent timer interrupts from kicking us out of idle
205 : * until a proper wakeup interrupt happens.
206 : */
207 :
208 : if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
209 : u64 max_latency_ns;
210 :
211 : if (idle_should_enter_s2idle()) {
212 :
213 : entered_state = call_cpuidle_s2idle(drv, dev);
214 : if (entered_state > 0)
215 : goto exit_idle;
216 :
217 : max_latency_ns = U64_MAX;
218 : } else {
219 : max_latency_ns = dev->forced_idle_latency_limit_ns;
220 : }
221 :
222 : tick_nohz_idle_stop_tick();
223 :
224 : next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
225 : call_cpuidle(drv, dev, next_state);
226 : } else {
227 : bool stop_tick = true;
228 :
229 : /*
230 : * Ask the cpuidle framework to choose a convenient idle state.
231 : */
232 : next_state = cpuidle_select(drv, dev, &stop_tick);
233 :
234 : if (stop_tick || tick_nohz_tick_stopped())
235 : tick_nohz_idle_stop_tick();
236 : else
237 : tick_nohz_idle_retain_tick();
238 :
239 : entered_state = call_cpuidle(drv, dev, next_state);
240 : /*
241 : * Give the governor an opportunity to reflect on the outcome
242 : */
243 : cpuidle_reflect(dev, entered_state);
244 : }
245 :
246 15366 : exit_idle:
247 15366 : __current_set_polling();
248 :
249 : /*
250 : * It is up to the idle functions to reenable local interrupts
251 : */
252 15405 : if (WARN_ON_ONCE(irqs_disabled()))
253 0 : local_irq_enable();
254 : }
255 :
256 : /*
257 : * Generic idle loop implementation
258 : *
259 : * Called with polling cleared.
260 : */
261 6878 : static void do_idle(void)
262 : {
263 6878 : int cpu = smp_processor_id();
264 : /*
265 : * If the arch has a polling bit, we maintain an invariant:
266 : *
267 : * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
268 : * rq->idle). This means that, if rq->idle has the polling bit set,
269 : * then setting need_resched is guaranteed to cause the CPU to
270 : * reschedule.
271 : */
272 :
273 6878 : __current_set_polling();
274 6879 : tick_nohz_idle_enter();
275 :
276 22225 : while (!need_resched()) {
277 15362 : rmb();
278 :
279 15355 : local_irq_disable();
280 :
281 15366 : if (cpu_is_offline(cpu)) {
282 0 : tick_nohz_idle_stop_tick();
283 0 : cpuhp_report_idle_dead();
284 0 : arch_cpu_idle_dead();
285 : }
286 :
287 15404 : arch_cpu_idle_enter();
288 15364 : rcu_nocb_flush_deferred_wakeup();
289 :
290 : /*
291 : * In poll mode we reenable interrupts and spin. Also if we
292 : * detected in the wakeup from idle path that the tick
293 : * broadcast device expired for us, we don't want to go deep
294 : * idle as we know that the IPI is going to arrive right away.
295 : */
296 15364 : if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
297 0 : tick_nohz_idle_restart_tick();
298 0 : cpu_idle_poll();
299 : } else {
300 15367 : cpuidle_idle_call();
301 : }
302 15364 : arch_cpu_idle_exit();
303 : }
304 :
305 : /*
306 : * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
307 : * be set, propagate it into PREEMPT_NEED_RESCHED.
308 : *
309 : * This is required because for polling idle loops we will not have had
310 : * an IPI to fold the state for us.
311 : */
312 6872 : preempt_set_need_resched();
313 6872 : tick_nohz_idle_exit();
314 6874 : __current_clr_polling();
315 :
316 : /*
317 : * We promise to call sched_ttwu_pending() and reschedule if
318 : * need_resched() is set while polling is set. That means that clearing
319 : * polling needs to be visible before doing these things.
320 : */
321 6876 : smp_mb__after_atomic();
322 :
323 : /*
324 : * RCU relies on this call to be done outside of an RCU read-side
325 : * critical section.
326 : */
327 6876 : flush_smp_call_function_from_idle();
328 6876 : schedule_idle();
329 :
330 6874 : if (unlikely(klp_patch_pending(current)))
331 6874 : klp_update_patch_state(current);
332 6874 : }
333 :
334 0 : bool cpu_in_idle(unsigned long pc)
335 : {
336 0 : return pc >= (unsigned long)__cpuidle_text_start &&
337 0 : pc < (unsigned long)__cpuidle_text_end;
338 : }
339 :
340 : struct idle_timer {
341 : struct hrtimer timer;
342 : int done;
343 : };
344 :
345 0 : static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
346 : {
347 0 : struct idle_timer *it = container_of(timer, struct idle_timer, timer);
348 :
349 0 : WRITE_ONCE(it->done, 1);
350 0 : set_tsk_need_resched(current);
351 :
352 0 : return HRTIMER_NORESTART;
353 : }
354 :
355 0 : void play_idle_precise(u64 duration_ns, u64 latency_ns)
356 : {
357 0 : struct idle_timer it;
358 :
359 : /*
360 : * Only FIFO tasks can disable the tick since they don't need the forced
361 : * preemption.
362 : */
363 0 : WARN_ON_ONCE(current->policy != SCHED_FIFO);
364 0 : WARN_ON_ONCE(current->nr_cpus_allowed != 1);
365 0 : WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
366 0 : WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
367 0 : WARN_ON_ONCE(!duration_ns);
368 0 : WARN_ON_ONCE(current->mm);
369 :
370 0 : rcu_sleep_check();
371 0 : preempt_disable();
372 0 : current->flags |= PF_IDLE;
373 0 : cpuidle_use_deepest_state(latency_ns);
374 :
375 0 : it.done = 0;
376 0 : hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
377 0 : it.timer.function = idle_inject_timer_fn;
378 0 : hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
379 : HRTIMER_MODE_REL_PINNED);
380 :
381 0 : while (!READ_ONCE(it.done))
382 0 : do_idle();
383 :
384 0 : cpuidle_use_deepest_state(0);
385 0 : current->flags &= ~PF_IDLE;
386 :
387 0 : preempt_fold_need_resched();
388 0 : preempt_enable();
389 0 : }
390 : EXPORT_SYMBOL_GPL(play_idle_precise);
391 :
392 4 : void cpu_startup_entry(enum cpuhp_state state)
393 : {
394 4 : arch_cpu_idle_prepare();
395 4 : cpuhp_online_idle(state);
396 6878 : while (1)
397 6878 : do_idle();
398 : }
399 :
400 : /*
401 : * idle-task scheduling class.
402 : */
403 :
404 : #ifdef CONFIG_SMP
405 : static int
406 0 : select_task_rq_idle(struct task_struct *p, int cpu, int flags)
407 : {
408 0 : return task_cpu(p); /* IDLE tasks as never migrated */
409 : }
410 :
411 : static int
412 0 : balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
413 : {
414 0 : return WARN_ON_ONCE(1);
415 : }
416 : #endif
417 :
418 : /*
419 : * Idle tasks are unconditionally rescheduled:
420 : */
421 0 : static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
422 : {
423 0 : resched_curr(rq);
424 0 : }
425 :
426 6965 : static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
427 : {
428 6965 : }
429 :
430 6966 : static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
431 : {
432 0 : update_idle_core(rq);
433 6966 : schedstat_inc(rq->sched_goidle);
434 0 : }
435 :
436 6966 : struct task_struct *pick_next_task_idle(struct rq *rq)
437 : {
438 6966 : struct task_struct *next = rq->idle;
439 :
440 6966 : set_next_task_idle(rq, next, true);
441 :
442 6966 : return next;
443 : }
444 :
445 : /*
446 : * It is not legal to sleep in the idle task - print a warning
447 : * message if some code attempts to do it:
448 : */
449 : static void
450 0 : dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
451 : {
452 0 : raw_spin_unlock_irq(&rq->lock);
453 0 : printk(KERN_ERR "bad: scheduling from the idle thread!\n");
454 0 : dump_stack();
455 0 : raw_spin_lock_irq(&rq->lock);
456 0 : }
457 :
458 : /*
459 : * scheduler tick hitting a task of our scheduling class.
460 : *
461 : * NOTE: This function can be called remotely by the tick offload that
462 : * goes along full dynticks. Therefore no local assumption can be made
463 : * and everything must be accessed through the @rq and @curr passed in
464 : * parameters.
465 : */
466 10190 : static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
467 : {
468 10190 : }
469 :
470 0 : static void switched_to_idle(struct rq *rq, struct task_struct *p)
471 : {
472 0 : BUG();
473 : }
474 :
475 : static void
476 0 : prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
477 : {
478 0 : BUG();
479 : }
480 :
481 0 : static void update_curr_idle(struct rq *rq)
482 : {
483 0 : }
484 :
485 : /*
486 : * Simple, special scheduling class for the per-CPU idle tasks:
487 : */
488 : DEFINE_SCHED_CLASS(idle) = {
489 :
490 : /* no enqueue/yield_task for idle tasks */
491 :
492 : /* dequeue is not valid, we print a debug message there: */
493 : .dequeue_task = dequeue_task_idle,
494 :
495 : .check_preempt_curr = check_preempt_curr_idle,
496 :
497 : .pick_next_task = pick_next_task_idle,
498 : .put_prev_task = put_prev_task_idle,
499 : .set_next_task = set_next_task_idle,
500 :
501 : #ifdef CONFIG_SMP
502 : .balance = balance_idle,
503 : .select_task_rq = select_task_rq_idle,
504 : .set_cpus_allowed = set_cpus_allowed_common,
505 : #endif
506 :
507 : .task_tick = task_tick_idle,
508 :
509 : .prio_changed = prio_changed_idle,
510 : .switched_to = switched_to_idle,
511 : .update_curr = update_curr_idle,
512 : };
|