Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * stop-task scheduling class.
4 : *
5 : * The stop task is the highest priority task in the system, it preempts
6 : * everything and will be preempted by nothing.
7 : *
8 : * See kernel/stop_machine.c
9 : */
10 : #include "sched.h"
11 :
12 : #ifdef CONFIG_SMP
13 : static int
14 0 : select_task_rq_stop(struct task_struct *p, int cpu, int flags)
15 : {
16 0 : return task_cpu(p); /* stop tasks as never migrate */
17 : }
18 :
19 : static int
20 39 : balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
21 : {
22 39 : return sched_stop_runnable(rq);
23 : }
24 : #endif /* CONFIG_SMP */
25 :
26 : static void
27 0 : check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
28 : {
29 : /* we're never preempted */
30 0 : }
31 :
32 39 : static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
33 : {
34 0 : stop->se.exec_start = rq_clock_task(rq);
35 0 : }
36 :
37 78 : static struct task_struct *pick_next_task_stop(struct rq *rq)
38 : {
39 156 : if (!sched_stop_runnable(rq))
40 : return NULL;
41 :
42 39 : set_next_task_stop(rq, rq->stop, true);
43 40 : return rq->stop;
44 : }
45 :
46 : static void
47 40 : enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
48 : {
49 40 : add_nr_running(rq, 1);
50 40 : }
51 :
52 : static void
53 40 : dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
54 : {
55 40 : sub_nr_running(rq, 1);
56 40 : }
57 :
58 0 : static void yield_task_stop(struct rq *rq)
59 : {
60 0 : BUG(); /* the stop task should never yield, its pointless. */
61 : }
62 :
63 40 : static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
64 : {
65 40 : struct task_struct *curr = rq->curr;
66 40 : u64 delta_exec;
67 :
68 40 : delta_exec = rq_clock_task(rq) - curr->se.exec_start;
69 40 : if (unlikely((s64)delta_exec < 0))
70 0 : delta_exec = 0;
71 :
72 40 : schedstat_set(curr->se.statistics.exec_max,
73 : max(curr->se.statistics.exec_max, delta_exec));
74 :
75 40 : curr->se.sum_exec_runtime += delta_exec;
76 40 : account_group_exec_runtime(curr, delta_exec);
77 :
78 39 : curr->se.exec_start = rq_clock_task(rq);
79 39 : cgroup_account_cputime(curr, delta_exec);
80 39 : }
81 :
82 : /*
83 : * scheduler tick hitting a task of our scheduling class.
84 : *
85 : * NOTE: This function can be called remotely by the tick offload that
86 : * goes along full dynticks. Therefore no local assumption can be made
87 : * and everything must be accessed through the @rq and @curr passed in
88 : * parameters.
89 : */
90 1 : static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
91 : {
92 1 : }
93 :
94 0 : static void switched_to_stop(struct rq *rq, struct task_struct *p)
95 : {
96 0 : BUG(); /* its impossible to change to this class */
97 : }
98 :
99 : static void
100 0 : prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
101 : {
102 0 : BUG(); /* how!?, what priority? */
103 : }
104 :
105 0 : static void update_curr_stop(struct rq *rq)
106 : {
107 0 : }
108 :
109 : /*
110 : * Simple, special scheduling class for the per-CPU stop tasks:
111 : */
112 : DEFINE_SCHED_CLASS(stop) = {
113 :
114 : .enqueue_task = enqueue_task_stop,
115 : .dequeue_task = dequeue_task_stop,
116 : .yield_task = yield_task_stop,
117 :
118 : .check_preempt_curr = check_preempt_curr_stop,
119 :
120 : .pick_next_task = pick_next_task_stop,
121 : .put_prev_task = put_prev_task_stop,
122 : .set_next_task = set_next_task_stop,
123 :
124 : #ifdef CONFIG_SMP
125 : .balance = balance_stop,
126 : .select_task_rq = select_task_rq_stop,
127 : .set_cpus_allowed = set_cpus_allowed_common,
128 : #endif
129 :
130 : .task_tick = task_tick_stop,
131 :
132 : .prio_changed = prio_changed_stop,
133 : .switched_to = switched_to_stop,
134 : .update_curr = update_curr_stop,
135 : };
|