Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 :
3 : #include "blk-rq-qos.h"
4 :
5 : /*
6 : * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
7 : * false if 'v' + 1 would be bigger than 'below'.
8 : */
9 0 : static bool atomic_inc_below(atomic_t *v, unsigned int below)
10 : {
11 0 : unsigned int cur = atomic_read(v);
12 :
13 0 : for (;;) {
14 0 : unsigned int old;
15 :
16 0 : if (cur >= below)
17 : return false;
18 0 : old = atomic_cmpxchg(v, cur, cur + 1);
19 0 : if (old == cur)
20 : break;
21 : cur = old;
22 : }
23 :
24 : return true;
25 : }
26 :
27 0 : bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
28 : {
29 0 : return atomic_inc_below(&rq_wait->inflight, limit);
30 : }
31 :
32 0 : void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
33 : {
34 0 : do {
35 0 : if (rqos->ops->cleanup)
36 0 : rqos->ops->cleanup(rqos, bio);
37 0 : rqos = rqos->next;
38 0 : } while (rqos);
39 0 : }
40 :
41 0 : void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
42 : {
43 0 : do {
44 0 : if (rqos->ops->done)
45 0 : rqos->ops->done(rqos, rq);
46 0 : rqos = rqos->next;
47 0 : } while (rqos);
48 0 : }
49 :
50 0 : void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
51 : {
52 0 : do {
53 0 : if (rqos->ops->issue)
54 0 : rqos->ops->issue(rqos, rq);
55 0 : rqos = rqos->next;
56 0 : } while (rqos);
57 0 : }
58 :
59 0 : void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
60 : {
61 0 : do {
62 0 : if (rqos->ops->requeue)
63 0 : rqos->ops->requeue(rqos, rq);
64 0 : rqos = rqos->next;
65 0 : } while (rqos);
66 0 : }
67 :
68 0 : void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
69 : {
70 0 : do {
71 0 : if (rqos->ops->throttle)
72 0 : rqos->ops->throttle(rqos, bio);
73 0 : rqos = rqos->next;
74 0 : } while (rqos);
75 0 : }
76 :
77 0 : void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
78 : {
79 0 : do {
80 0 : if (rqos->ops->track)
81 0 : rqos->ops->track(rqos, rq, bio);
82 0 : rqos = rqos->next;
83 0 : } while (rqos);
84 0 : }
85 :
86 0 : void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
87 : {
88 0 : do {
89 0 : if (rqos->ops->merge)
90 0 : rqos->ops->merge(rqos, rq, bio);
91 0 : rqos = rqos->next;
92 0 : } while (rqos);
93 0 : }
94 :
95 0 : void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
96 : {
97 0 : do {
98 0 : if (rqos->ops->done_bio)
99 0 : rqos->ops->done_bio(rqos, bio);
100 0 : rqos = rqos->next;
101 0 : } while (rqos);
102 0 : }
103 :
104 0 : void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
105 : {
106 0 : do {
107 0 : if (rqos->ops->queue_depth_changed)
108 0 : rqos->ops->queue_depth_changed(rqos);
109 0 : rqos = rqos->next;
110 0 : } while (rqos);
111 0 : }
112 :
113 : /*
114 : * Return true, if we can't increase the depth further by scaling
115 : */
116 0 : bool rq_depth_calc_max_depth(struct rq_depth *rqd)
117 : {
118 0 : unsigned int depth;
119 0 : bool ret = false;
120 :
121 : /*
122 : * For QD=1 devices, this is a special case. It's important for those
123 : * to have one request ready when one completes, so force a depth of
124 : * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
125 : * since the device can't have more than that in flight. If we're
126 : * scaling down, then keep a setting of 1/1/1.
127 : */
128 0 : if (rqd->queue_depth == 1) {
129 0 : if (rqd->scale_step > 0)
130 0 : rqd->max_depth = 1;
131 : else {
132 0 : rqd->max_depth = 2;
133 0 : ret = true;
134 : }
135 : } else {
136 : /*
137 : * scale_step == 0 is our default state. If we have suffered
138 : * latency spikes, step will be > 0, and we shrink the
139 : * allowed write depths. If step is < 0, we're only doing
140 : * writes, and we allow a temporarily higher depth to
141 : * increase performance.
142 : */
143 0 : depth = min_t(unsigned int, rqd->default_depth,
144 : rqd->queue_depth);
145 0 : if (rqd->scale_step > 0)
146 0 : depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
147 0 : else if (rqd->scale_step < 0) {
148 0 : unsigned int maxd = 3 * rqd->queue_depth / 4;
149 :
150 0 : depth = 1 + ((depth - 1) << -rqd->scale_step);
151 0 : if (depth > maxd) {
152 0 : depth = maxd;
153 0 : ret = true;
154 : }
155 : }
156 :
157 0 : rqd->max_depth = depth;
158 : }
159 :
160 0 : return ret;
161 : }
162 :
163 : /* Returns true on success and false if scaling up wasn't possible */
164 0 : bool rq_depth_scale_up(struct rq_depth *rqd)
165 : {
166 : /*
167 : * Hit max in previous round, stop here
168 : */
169 0 : if (rqd->scaled_max)
170 : return false;
171 :
172 0 : rqd->scale_step--;
173 :
174 0 : rqd->scaled_max = rq_depth_calc_max_depth(rqd);
175 0 : return true;
176 : }
177 :
178 : /*
179 : * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
180 : * had a latency violation. Returns true on success and returns false if
181 : * scaling down wasn't possible.
182 : */
183 0 : bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
184 : {
185 : /*
186 : * Stop scaling down when we've hit the limit. This also prevents
187 : * ->scale_step from going to crazy values, if the device can't
188 : * keep up.
189 : */
190 0 : if (rqd->max_depth == 1)
191 : return false;
192 :
193 0 : if (rqd->scale_step < 0 && hard_throttle)
194 0 : rqd->scale_step = 0;
195 : else
196 0 : rqd->scale_step++;
197 :
198 0 : rqd->scaled_max = false;
199 0 : rq_depth_calc_max_depth(rqd);
200 0 : return true;
201 : }
202 :
203 : struct rq_qos_wait_data {
204 : struct wait_queue_entry wq;
205 : struct task_struct *task;
206 : struct rq_wait *rqw;
207 : acquire_inflight_cb_t *cb;
208 : void *private_data;
209 : bool got_token;
210 : };
211 :
212 0 : static int rq_qos_wake_function(struct wait_queue_entry *curr,
213 : unsigned int mode, int wake_flags, void *key)
214 : {
215 0 : struct rq_qos_wait_data *data = container_of(curr,
216 : struct rq_qos_wait_data,
217 : wq);
218 :
219 : /*
220 : * If we fail to get a budget, return -1 to interrupt the wake up loop
221 : * in __wake_up_common.
222 : */
223 0 : if (!data->cb(data->rqw, data->private_data))
224 : return -1;
225 :
226 0 : data->got_token = true;
227 0 : smp_wmb();
228 0 : list_del_init(&curr->entry);
229 0 : wake_up_process(data->task);
230 0 : return 1;
231 : }
232 :
233 : /**
234 : * rq_qos_wait - throttle on a rqw if we need to
235 : * @rqw: rqw to throttle on
236 : * @private_data: caller provided specific data
237 : * @acquire_inflight_cb: inc the rqw->inflight counter if we can
238 : * @cleanup_cb: the callback to cleanup in case we race with a waker
239 : *
240 : * This provides a uniform place for the rq_qos users to do their throttling.
241 : * Since you can end up with a lot of things sleeping at once, this manages the
242 : * waking up based on the resources available. The acquire_inflight_cb should
243 : * inc the rqw->inflight if we have the ability to do so, or return false if not
244 : * and then we will sleep until the room becomes available.
245 : *
246 : * cleanup_cb is in case that we race with a waker and need to cleanup the
247 : * inflight count accordingly.
248 : */
249 0 : void rq_qos_wait(struct rq_wait *rqw, void *private_data,
250 : acquire_inflight_cb_t *acquire_inflight_cb,
251 : cleanup_cb_t *cleanup_cb)
252 : {
253 0 : struct rq_qos_wait_data data = {
254 : .wq = {
255 : .func = rq_qos_wake_function,
256 : .entry = LIST_HEAD_INIT(data.wq.entry),
257 : },
258 0 : .task = current,
259 : .rqw = rqw,
260 : .cb = acquire_inflight_cb,
261 : .private_data = private_data,
262 : };
263 0 : bool has_sleeper;
264 :
265 0 : has_sleeper = wq_has_sleeper(&rqw->wait);
266 0 : if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
267 0 : return;
268 :
269 0 : prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
270 0 : has_sleeper = !wq_has_single_sleeper(&rqw->wait);
271 0 : do {
272 : /* The memory barrier in set_task_state saves us here. */
273 0 : if (data.got_token)
274 : break;
275 0 : if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
276 0 : finish_wait(&rqw->wait, &data.wq);
277 :
278 : /*
279 : * We raced with wbt_wake_function() getting a token,
280 : * which means we now have two. Put our local token
281 : * and wake anyone else potentially waiting for one.
282 : */
283 0 : smp_rmb();
284 0 : if (data.got_token)
285 0 : cleanup_cb(rqw, private_data);
286 : break;
287 : }
288 0 : io_schedule();
289 0 : has_sleeper = true;
290 0 : set_current_state(TASK_UNINTERRUPTIBLE);
291 0 : } while (1);
292 0 : finish_wait(&rqw->wait, &data.wq);
293 : }
294 :
295 0 : void rq_qos_exit(struct request_queue *q)
296 : {
297 0 : blk_mq_debugfs_unregister_queue_rqos(q);
298 :
299 0 : while (q->rq_qos) {
300 0 : struct rq_qos *rqos = q->rq_qos;
301 0 : q->rq_qos = rqos->next;
302 0 : rqos->ops->exit(rqos);
303 : }
304 0 : }
|