Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * inet fragments management
4 : *
5 : * Authors: Pavel Emelyanov <xemul@openvz.org>
6 : * Started as consolidation of ipv4/ip_fragment.c,
7 : * ipv6/reassembly. and ipv6 nf conntrack reassembly
8 : */
9 :
10 : #include <linux/list.h>
11 : #include <linux/spinlock.h>
12 : #include <linux/module.h>
13 : #include <linux/timer.h>
14 : #include <linux/mm.h>
15 : #include <linux/random.h>
16 : #include <linux/skbuff.h>
17 : #include <linux/rtnetlink.h>
18 : #include <linux/slab.h>
19 : #include <linux/rhashtable.h>
20 :
21 : #include <net/sock.h>
22 : #include <net/inet_frag.h>
23 : #include <net/inet_ecn.h>
24 : #include <net/ip.h>
25 : #include <net/ipv6.h>
26 :
27 : /* Use skb->cb to track consecutive/adjacent fragments coming at
28 : * the end of the queue. Nodes in the rb-tree queue will
29 : * contain "runs" of one or more adjacent fragments.
30 : *
31 : * Invariants:
32 : * - next_frag is NULL at the tail of a "run";
33 : * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
34 : */
35 : struct ipfrag_skb_cb {
36 : union {
37 : struct inet_skb_parm h4;
38 : struct inet6_skb_parm h6;
39 : };
40 : struct sk_buff *next_frag;
41 : int frag_run_len;
42 : };
43 :
44 : #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
45 :
46 0 : static void fragcb_clear(struct sk_buff *skb)
47 : {
48 0 : RB_CLEAR_NODE(&skb->rbnode);
49 0 : FRAG_CB(skb)->next_frag = NULL;
50 0 : FRAG_CB(skb)->frag_run_len = skb->len;
51 : }
52 :
53 : /* Append skb to the last "run". */
54 0 : static void fragrun_append_to_last(struct inet_frag_queue *q,
55 : struct sk_buff *skb)
56 : {
57 0 : fragcb_clear(skb);
58 :
59 0 : FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
60 0 : FRAG_CB(q->fragments_tail)->next_frag = skb;
61 0 : q->fragments_tail = skb;
62 0 : }
63 :
64 : /* Create a new "run" with the skb. */
65 0 : static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
66 : {
67 0 : BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
68 0 : fragcb_clear(skb);
69 :
70 0 : if (q->last_run_head)
71 0 : rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
72 : &q->last_run_head->rbnode.rb_right);
73 : else
74 0 : rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
75 0 : rb_insert_color(&skb->rbnode, &q->rb_fragments);
76 :
77 0 : q->fragments_tail = skb;
78 0 : q->last_run_head = skb;
79 0 : }
80 :
81 : /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
82 : * Value : 0xff if frame should be dropped.
83 : * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
84 : */
85 : const u8 ip_frag_ecn_table[16] = {
86 : /* at least one fragment had CE, and others ECT_0 or ECT_1 */
87 : [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
88 : [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
89 : [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
90 :
91 : /* invalid combinations : drop frame */
92 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
93 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
94 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
95 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
96 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
97 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
98 : [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99 : };
100 : EXPORT_SYMBOL(ip_frag_ecn_table);
101 :
102 1 : int inet_frags_init(struct inet_frags *f)
103 : {
104 1 : f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
105 : NULL);
106 1 : if (!f->frags_cachep)
107 : return -ENOMEM;
108 :
109 1 : refcount_set(&f->refcnt, 1);
110 1 : init_completion(&f->completion);
111 1 : return 0;
112 : }
113 : EXPORT_SYMBOL(inet_frags_init);
114 :
115 0 : void inet_frags_fini(struct inet_frags *f)
116 : {
117 0 : if (refcount_dec_and_test(&f->refcnt))
118 0 : complete(&f->completion);
119 :
120 0 : wait_for_completion(&f->completion);
121 :
122 0 : kmem_cache_destroy(f->frags_cachep);
123 0 : f->frags_cachep = NULL;
124 0 : }
125 : EXPORT_SYMBOL(inet_frags_fini);
126 :
127 : /* called from rhashtable_free_and_destroy() at netns_frags dismantle */
128 0 : static void inet_frags_free_cb(void *ptr, void *arg)
129 : {
130 0 : struct inet_frag_queue *fq = ptr;
131 0 : int count;
132 :
133 0 : count = del_timer_sync(&fq->timer) ? 1 : 0;
134 :
135 0 : spin_lock_bh(&fq->lock);
136 0 : if (!(fq->flags & INET_FRAG_COMPLETE)) {
137 0 : fq->flags |= INET_FRAG_COMPLETE;
138 0 : count++;
139 0 : } else if (fq->flags & INET_FRAG_HASH_DEAD) {
140 0 : count++;
141 : }
142 0 : spin_unlock_bh(&fq->lock);
143 :
144 0 : if (refcount_sub_and_test(count, &fq->refcnt))
145 0 : inet_frag_destroy(fq);
146 0 : }
147 :
148 : static LLIST_HEAD(fqdir_free_list);
149 :
150 0 : static void fqdir_free_fn(struct work_struct *work)
151 : {
152 0 : struct llist_node *kill_list;
153 0 : struct fqdir *fqdir, *tmp;
154 0 : struct inet_frags *f;
155 :
156 : /* Atomically snapshot the list of fqdirs to free */
157 0 : kill_list = llist_del_all(&fqdir_free_list);
158 :
159 : /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
160 : * have completed, since they need to dereference fqdir.
161 : * Would it not be nice to have kfree_rcu_barrier() ? :)
162 : */
163 0 : rcu_barrier();
164 :
165 0 : llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
166 0 : f = fqdir->f;
167 0 : if (refcount_dec_and_test(&f->refcnt))
168 0 : complete(&f->completion);
169 :
170 0 : kfree(fqdir);
171 : }
172 0 : }
173 :
174 : static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
175 :
176 0 : static void fqdir_work_fn(struct work_struct *work)
177 : {
178 0 : struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
179 :
180 0 : rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
181 :
182 0 : if (llist_add(&fqdir->free_list, &fqdir_free_list))
183 0 : queue_work(system_wq, &fqdir_free_work);
184 0 : }
185 :
186 1 : int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
187 : {
188 1 : struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
189 1 : int res;
190 :
191 1 : if (!fqdir)
192 : return -ENOMEM;
193 1 : fqdir->f = f;
194 1 : fqdir->net = net;
195 1 : res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
196 1 : if (res < 0) {
197 0 : kfree(fqdir);
198 0 : return res;
199 : }
200 1 : refcount_inc(&f->refcnt);
201 1 : *fqdirp = fqdir;
202 1 : return 0;
203 : }
204 : EXPORT_SYMBOL(fqdir_init);
205 :
206 : static struct workqueue_struct *inet_frag_wq;
207 :
208 1 : static int __init inet_frag_wq_init(void)
209 : {
210 1 : inet_frag_wq = create_workqueue("inet_frag_wq");
211 1 : if (!inet_frag_wq)
212 0 : panic("Could not create inet frag workq");
213 1 : return 0;
214 : }
215 :
216 : pure_initcall(inet_frag_wq_init);
217 :
218 0 : void fqdir_exit(struct fqdir *fqdir)
219 : {
220 0 : INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
221 0 : queue_work(inet_frag_wq, &fqdir->destroy_work);
222 0 : }
223 : EXPORT_SYMBOL(fqdir_exit);
224 :
225 0 : void inet_frag_kill(struct inet_frag_queue *fq)
226 : {
227 0 : if (del_timer(&fq->timer))
228 0 : refcount_dec(&fq->refcnt);
229 :
230 0 : if (!(fq->flags & INET_FRAG_COMPLETE)) {
231 0 : struct fqdir *fqdir = fq->fqdir;
232 :
233 0 : fq->flags |= INET_FRAG_COMPLETE;
234 0 : rcu_read_lock();
235 : /* The RCU read lock provides a memory barrier
236 : * guaranteeing that if fqdir->dead is false then
237 : * the hash table destruction will not start until
238 : * after we unlock. Paired with inet_frags_exit_net().
239 : */
240 0 : if (!fqdir->dead) {
241 0 : rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
242 0 : fqdir->f->rhash_params);
243 0 : refcount_dec(&fq->refcnt);
244 : } else {
245 0 : fq->flags |= INET_FRAG_HASH_DEAD;
246 : }
247 0 : rcu_read_unlock();
248 : }
249 0 : }
250 : EXPORT_SYMBOL(inet_frag_kill);
251 :
252 0 : static void inet_frag_destroy_rcu(struct rcu_head *head)
253 : {
254 0 : struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
255 : rcu);
256 0 : struct inet_frags *f = q->fqdir->f;
257 :
258 0 : if (f->destructor)
259 0 : f->destructor(q);
260 0 : kmem_cache_free(f->frags_cachep, q);
261 0 : }
262 :
263 0 : unsigned int inet_frag_rbtree_purge(struct rb_root *root)
264 : {
265 0 : struct rb_node *p = rb_first(root);
266 0 : unsigned int sum = 0;
267 :
268 0 : while (p) {
269 0 : struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
270 :
271 0 : p = rb_next(p);
272 0 : rb_erase(&skb->rbnode, root);
273 0 : while (skb) {
274 0 : struct sk_buff *next = FRAG_CB(skb)->next_frag;
275 :
276 0 : sum += skb->truesize;
277 0 : kfree_skb(skb);
278 0 : skb = next;
279 : }
280 : }
281 0 : return sum;
282 : }
283 : EXPORT_SYMBOL(inet_frag_rbtree_purge);
284 :
285 0 : void inet_frag_destroy(struct inet_frag_queue *q)
286 : {
287 0 : struct fqdir *fqdir;
288 0 : unsigned int sum, sum_truesize = 0;
289 0 : struct inet_frags *f;
290 :
291 0 : WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
292 0 : WARN_ON(del_timer(&q->timer) != 0);
293 :
294 : /* Release all fragment data. */
295 0 : fqdir = q->fqdir;
296 0 : f = fqdir->f;
297 0 : sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
298 0 : sum = sum_truesize + f->qsize;
299 :
300 0 : call_rcu(&q->rcu, inet_frag_destroy_rcu);
301 :
302 0 : sub_frag_mem_limit(fqdir, sum);
303 0 : }
304 : EXPORT_SYMBOL(inet_frag_destroy);
305 :
306 0 : static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
307 : struct inet_frags *f,
308 : void *arg)
309 : {
310 0 : struct inet_frag_queue *q;
311 :
312 0 : q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
313 0 : if (!q)
314 : return NULL;
315 :
316 0 : q->fqdir = fqdir;
317 0 : f->constructor(q, arg);
318 0 : add_frag_mem_limit(fqdir, f->qsize);
319 :
320 0 : timer_setup(&q->timer, f->frag_expire, 0);
321 0 : spin_lock_init(&q->lock);
322 0 : refcount_set(&q->refcnt, 3);
323 :
324 0 : return q;
325 : }
326 :
327 0 : static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
328 : void *arg,
329 : struct inet_frag_queue **prev)
330 : {
331 0 : struct inet_frags *f = fqdir->f;
332 0 : struct inet_frag_queue *q;
333 :
334 0 : q = inet_frag_alloc(fqdir, f, arg);
335 0 : if (!q) {
336 0 : *prev = ERR_PTR(-ENOMEM);
337 0 : return NULL;
338 : }
339 0 : mod_timer(&q->timer, jiffies + fqdir->timeout);
340 :
341 0 : *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
342 : &q->node, f->rhash_params);
343 0 : if (*prev) {
344 0 : q->flags |= INET_FRAG_COMPLETE;
345 0 : inet_frag_kill(q);
346 0 : inet_frag_destroy(q);
347 0 : return NULL;
348 : }
349 : return q;
350 : }
351 :
352 : /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
353 0 : struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
354 : {
355 0 : struct inet_frag_queue *fq = NULL, *prev;
356 :
357 0 : if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh)
358 0 : return NULL;
359 :
360 0 : rcu_read_lock();
361 :
362 0 : prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
363 0 : if (!prev)
364 0 : fq = inet_frag_create(fqdir, key, &prev);
365 0 : if (!IS_ERR_OR_NULL(prev)) {
366 0 : fq = prev;
367 0 : if (!refcount_inc_not_zero(&fq->refcnt))
368 0 : fq = NULL;
369 : }
370 0 : rcu_read_unlock();
371 0 : return fq;
372 : }
373 : EXPORT_SYMBOL(inet_frag_find);
374 :
375 0 : int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
376 : int offset, int end)
377 : {
378 0 : struct sk_buff *last = q->fragments_tail;
379 :
380 : /* RFC5722, Section 4, amended by Errata ID : 3089
381 : * When reassembling an IPv6 datagram, if
382 : * one or more its constituent fragments is determined to be an
383 : * overlapping fragment, the entire datagram (and any constituent
384 : * fragments) MUST be silently discarded.
385 : *
386 : * Duplicates, however, should be ignored (i.e. skb dropped, but the
387 : * queue/fragments kept for later reassembly).
388 : */
389 0 : if (!last)
390 0 : fragrun_create(q, skb); /* First fragment. */
391 0 : else if (last->ip_defrag_offset + last->len < end) {
392 : /* This is the common case: skb goes to the end. */
393 : /* Detect and discard overlaps. */
394 0 : if (offset < last->ip_defrag_offset + last->len)
395 : return IPFRAG_OVERLAP;
396 0 : if (offset == last->ip_defrag_offset + last->len)
397 0 : fragrun_append_to_last(q, skb);
398 : else
399 0 : fragrun_create(q, skb);
400 : } else {
401 : /* Binary search. Note that skb can become the first fragment,
402 : * but not the last (covered above).
403 : */
404 0 : struct rb_node **rbn, *parent;
405 :
406 0 : rbn = &q->rb_fragments.rb_node;
407 0 : do {
408 0 : struct sk_buff *curr;
409 0 : int curr_run_end;
410 :
411 0 : parent = *rbn;
412 0 : curr = rb_to_skb(parent);
413 0 : curr_run_end = curr->ip_defrag_offset +
414 0 : FRAG_CB(curr)->frag_run_len;
415 0 : if (end <= curr->ip_defrag_offset)
416 0 : rbn = &parent->rb_left;
417 0 : else if (offset >= curr_run_end)
418 0 : rbn = &parent->rb_right;
419 0 : else if (offset >= curr->ip_defrag_offset &&
420 : end <= curr_run_end)
421 : return IPFRAG_DUP;
422 : else
423 0 : return IPFRAG_OVERLAP;
424 0 : } while (*rbn);
425 : /* Here we have parent properly set, and rbn pointing to
426 : * one of its NULL left/right children. Insert skb.
427 : */
428 0 : fragcb_clear(skb);
429 0 : rb_link_node(&skb->rbnode, parent, rbn);
430 0 : rb_insert_color(&skb->rbnode, &q->rb_fragments);
431 : }
432 :
433 0 : skb->ip_defrag_offset = offset;
434 :
435 0 : return IPFRAG_OK;
436 : }
437 : EXPORT_SYMBOL(inet_frag_queue_insert);
438 :
439 0 : void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
440 : struct sk_buff *parent)
441 : {
442 0 : struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
443 0 : struct sk_buff **nextp;
444 0 : int delta;
445 :
446 0 : if (head != skb) {
447 0 : fp = skb_clone(skb, GFP_ATOMIC);
448 0 : if (!fp)
449 : return NULL;
450 0 : FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
451 0 : if (RB_EMPTY_NODE(&skb->rbnode))
452 0 : FRAG_CB(parent)->next_frag = fp;
453 : else
454 0 : rb_replace_node(&skb->rbnode, &fp->rbnode,
455 : &q->rb_fragments);
456 0 : if (q->fragments_tail == skb)
457 0 : q->fragments_tail = fp;
458 0 : skb_morph(skb, head);
459 0 : FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
460 0 : rb_replace_node(&head->rbnode, &skb->rbnode,
461 : &q->rb_fragments);
462 0 : consume_skb(head);
463 0 : head = skb;
464 : }
465 0 : WARN_ON(head->ip_defrag_offset != 0);
466 :
467 0 : delta = -head->truesize;
468 :
469 : /* Head of list must not be cloned. */
470 0 : if (skb_unclone(head, GFP_ATOMIC))
471 : return NULL;
472 :
473 0 : delta += head->truesize;
474 0 : if (delta)
475 0 : add_frag_mem_limit(q->fqdir, delta);
476 :
477 : /* If the first fragment is fragmented itself, we split
478 : * it to two chunks: the first with data and paged part
479 : * and the second, holding only fragments.
480 : */
481 0 : if (skb_has_frag_list(head)) {
482 0 : struct sk_buff *clone;
483 0 : int i, plen = 0;
484 :
485 0 : clone = alloc_skb(0, GFP_ATOMIC);
486 0 : if (!clone)
487 : return NULL;
488 0 : skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
489 0 : skb_frag_list_init(head);
490 0 : for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
491 0 : plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
492 0 : clone->data_len = head->data_len - plen;
493 0 : clone->len = clone->data_len;
494 0 : head->truesize += clone->truesize;
495 0 : clone->csum = 0;
496 0 : clone->ip_summed = head->ip_summed;
497 0 : add_frag_mem_limit(q->fqdir, clone->truesize);
498 0 : skb_shinfo(head)->frag_list = clone;
499 0 : nextp = &clone->next;
500 : } else {
501 0 : nextp = &skb_shinfo(head)->frag_list;
502 : }
503 :
504 : return nextp;
505 : }
506 : EXPORT_SYMBOL(inet_frag_reasm_prepare);
507 :
508 0 : void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
509 : void *reasm_data, bool try_coalesce)
510 : {
511 0 : struct sk_buff **nextp = (struct sk_buff **)reasm_data;
512 0 : struct rb_node *rbn;
513 0 : struct sk_buff *fp;
514 0 : int sum_truesize;
515 :
516 0 : skb_push(head, head->data - skb_network_header(head));
517 :
518 : /* Traverse the tree in order, to build frag_list. */
519 0 : fp = FRAG_CB(head)->next_frag;
520 0 : rbn = rb_next(&head->rbnode);
521 0 : rb_erase(&head->rbnode, &q->rb_fragments);
522 :
523 0 : sum_truesize = head->truesize;
524 0 : while (rbn || fp) {
525 : /* fp points to the next sk_buff in the current run;
526 : * rbn points to the next run.
527 : */
528 : /* Go through the current run. */
529 0 : while (fp) {
530 0 : struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
531 0 : bool stolen;
532 0 : int delta;
533 :
534 0 : sum_truesize += fp->truesize;
535 0 : if (head->ip_summed != fp->ip_summed)
536 0 : head->ip_summed = CHECKSUM_NONE;
537 0 : else if (head->ip_summed == CHECKSUM_COMPLETE)
538 0 : head->csum = csum_add(head->csum, fp->csum);
539 :
540 0 : if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
541 : &delta)) {
542 0 : kfree_skb_partial(fp, stolen);
543 : } else {
544 0 : fp->prev = NULL;
545 0 : memset(&fp->rbnode, 0, sizeof(fp->rbnode));
546 0 : fp->sk = NULL;
547 :
548 0 : head->data_len += fp->len;
549 0 : head->len += fp->len;
550 0 : head->truesize += fp->truesize;
551 :
552 0 : *nextp = fp;
553 0 : nextp = &fp->next;
554 : }
555 :
556 0 : fp = next_frag;
557 : }
558 : /* Move to the next run. */
559 0 : if (rbn) {
560 0 : struct rb_node *rbnext = rb_next(rbn);
561 :
562 0 : fp = rb_to_skb(rbn);
563 0 : rb_erase(rbn, &q->rb_fragments);
564 0 : rbn = rbnext;
565 : }
566 : }
567 0 : sub_frag_mem_limit(q->fqdir, sum_truesize);
568 :
569 0 : *nextp = NULL;
570 0 : skb_mark_not_on_list(head);
571 0 : head->prev = NULL;
572 0 : head->tstamp = q->stamp;
573 0 : }
574 : EXPORT_SYMBOL(inet_frag_reasm_finish);
575 :
576 0 : struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
577 : {
578 0 : struct sk_buff *head, *skb;
579 :
580 0 : head = skb_rb_first(&q->rb_fragments);
581 0 : if (!head)
582 : return NULL;
583 0 : skb = FRAG_CB(head)->next_frag;
584 0 : if (skb)
585 0 : rb_replace_node(&head->rbnode, &skb->rbnode,
586 : &q->rb_fragments);
587 : else
588 0 : rb_erase(&head->rbnode, &q->rb_fragments);
589 0 : memset(&head->rbnode, 0, sizeof(head->rbnode));
590 0 : barrier();
591 :
592 0 : if (head == q->fragments_tail)
593 0 : q->fragments_tail = NULL;
594 :
595 0 : sub_frag_mem_limit(q->fqdir, head->truesize);
596 :
597 0 : return head;
598 : }
599 : EXPORT_SYMBOL(inet_frag_pull_head);
|