Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * linux/fs/file.c
4 : *
5 : * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 : *
7 : * Manage the dynamic fd arrays in the process files_struct.
8 : */
9 :
10 : #include <linux/syscalls.h>
11 : #include <linux/export.h>
12 : #include <linux/fs.h>
13 : #include <linux/kernel.h>
14 : #include <linux/mm.h>
15 : #include <linux/sched/signal.h>
16 : #include <linux/slab.h>
17 : #include <linux/file.h>
18 : #include <linux/fdtable.h>
19 : #include <linux/bitops.h>
20 : #include <linux/spinlock.h>
21 : #include <linux/rcupdate.h>
22 : #include <linux/close_range.h>
23 : #include <net/sock.h>
24 :
25 : #include "internal.h"
26 :
27 : unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28 : unsigned int sysctl_nr_open_min = BITS_PER_LONG;
29 : /* our min() is unusable in constant expressions ;-/ */
30 : #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 : unsigned int sysctl_nr_open_max =
32 : __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
33 :
34 4 : static void __free_fdtable(struct fdtable *fdt)
35 : {
36 4 : kvfree(fdt->fd);
37 4 : kvfree(fdt->open_fds);
38 4 : kfree(fdt);
39 4 : }
40 :
41 0 : static void free_fdtable_rcu(struct rcu_head *rcu)
42 : {
43 0 : __free_fdtable(container_of(rcu, struct fdtable, rcu));
44 0 : }
45 :
46 : #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 : #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
48 :
49 : /*
50 : * Copy 'count' fd bits from the old table to the new table and clear the extra
51 : * space if any. This does not copy the file pointers. Called with the files
52 : * spinlock held for write.
53 : */
54 1355 : static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
55 : unsigned int count)
56 : {
57 1355 : unsigned int cpy, set;
58 :
59 1355 : cpy = count / BITS_PER_BYTE;
60 1355 : set = (nfdt->max_fds - count) / BITS_PER_BYTE;
61 1355 : memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
62 1355 : memset((char *)nfdt->open_fds + cpy, 0, set);
63 1355 : memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
64 1355 : memset((char *)nfdt->close_on_exec + cpy, 0, set);
65 :
66 1355 : cpy = BITBIT_SIZE(count);
67 1355 : set = BITBIT_SIZE(nfdt->max_fds) - cpy;
68 1355 : memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
69 1355 : memset((char *)nfdt->full_fds_bits + cpy, 0, set);
70 1355 : }
71 :
72 : /*
73 : * Copy all file descriptors from the old table to the new, expanded table and
74 : * clear the extra space. Called with the files spinlock held for write.
75 : */
76 2 : static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
77 : {
78 2 : size_t cpy, set;
79 :
80 2 : BUG_ON(nfdt->max_fds < ofdt->max_fds);
81 :
82 2 : cpy = ofdt->max_fds * sizeof(struct file *);
83 2 : set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
84 2 : memcpy(nfdt->fd, ofdt->fd, cpy);
85 2 : memset((char *)nfdt->fd + cpy, 0, set);
86 :
87 2 : copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
88 2 : }
89 :
90 6 : static struct fdtable * alloc_fdtable(unsigned int nr)
91 : {
92 6 : struct fdtable *fdt;
93 6 : void *data;
94 :
95 : /*
96 : * Figure out how many fds we actually want to support in this fdtable.
97 : * Allocation steps are keyed to the size of the fdarray, since it
98 : * grows far faster than any of the other dynamic data. We try to fit
99 : * the fdarray into comfortable page-tuned chunks: starting at 1024B
100 : * and growing in powers of two from there on.
101 : */
102 6 : nr /= (1024 / sizeof(struct file *));
103 6 : nr = roundup_pow_of_two(nr + 1);
104 6 : nr *= (1024 / sizeof(struct file *));
105 : /*
106 : * Note that this can drive nr *below* what we had passed if sysctl_nr_open
107 : * had been set lower between the check in expand_files() and here. Deal
108 : * with that in caller, it's cheaper that way.
109 : *
110 : * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
111 : * bitmaps handling below becomes unpleasant, to put it mildly...
112 : */
113 6 : if (unlikely(nr > sysctl_nr_open))
114 0 : nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
115 :
116 6 : fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
117 6 : if (!fdt)
118 0 : goto out;
119 6 : fdt->max_fds = nr;
120 6 : data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
121 6 : if (!data)
122 0 : goto out_fdt;
123 6 : fdt->fd = data;
124 :
125 6 : data = kvmalloc(max_t(size_t,
126 : 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
127 : GFP_KERNEL_ACCOUNT);
128 6 : if (!data)
129 0 : goto out_arr;
130 6 : fdt->open_fds = data;
131 6 : data += nr / BITS_PER_BYTE;
132 6 : fdt->close_on_exec = data;
133 6 : data += nr / BITS_PER_BYTE;
134 6 : fdt->full_fds_bits = data;
135 :
136 6 : return fdt;
137 :
138 0 : out_arr:
139 0 : kvfree(fdt->fd);
140 0 : out_fdt:
141 0 : kfree(fdt);
142 : out:
143 : return NULL;
144 : }
145 :
146 : /*
147 : * Expand the file descriptor table.
148 : * This function will allocate a new fdtable and both fd array and fdset, of
149 : * the given size.
150 : * Return <0 error code on error; 1 on successful completion.
151 : * The files->file_lock should be held on entry, and will be held on exit.
152 : */
153 2 : static int expand_fdtable(struct files_struct *files, unsigned int nr)
154 : __releases(files->file_lock)
155 : __acquires(files->file_lock)
156 : {
157 2 : struct fdtable *new_fdt, *cur_fdt;
158 :
159 2 : spin_unlock(&files->file_lock);
160 2 : new_fdt = alloc_fdtable(nr);
161 :
162 : /* make sure all fd_install() have seen resize_in_progress
163 : * or have finished their rcu_read_lock_sched() section.
164 : */
165 2 : if (atomic_read(&files->count) > 1)
166 0 : synchronize_rcu();
167 :
168 2 : spin_lock(&files->file_lock);
169 2 : if (!new_fdt)
170 : return -ENOMEM;
171 : /*
172 : * extremely unlikely race - sysctl_nr_open decreased between the check in
173 : * caller and alloc_fdtable(). Cheaper to catch it here...
174 : */
175 2 : if (unlikely(new_fdt->max_fds <= nr)) {
176 0 : __free_fdtable(new_fdt);
177 0 : return -EMFILE;
178 : }
179 2 : cur_fdt = files_fdtable(files);
180 2 : BUG_ON(nr < cur_fdt->max_fds);
181 2 : copy_fdtable(new_fdt, cur_fdt);
182 2 : rcu_assign_pointer(files->fdt, new_fdt);
183 2 : if (cur_fdt != &files->fdtab)
184 0 : call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
185 : /* coupled with smp_rmb() in fd_install() */
186 2 : smp_wmb();
187 2 : return 1;
188 : }
189 :
190 : /*
191 : * Expand files.
192 : * This function will expand the file structures, if the requested size exceeds
193 : * the current capacity and there is room for expansion.
194 : * Return <0 error code on error; 0 when nothing done; 1 when files were
195 : * expanded and execution may have blocked.
196 : * The files->file_lock should be held on entry, and will be held on exit.
197 : */
198 61146 : static int expand_files(struct files_struct *files, unsigned int nr)
199 : __releases(files->file_lock)
200 : __acquires(files->file_lock)
201 : {
202 61146 : struct fdtable *fdt;
203 61146 : int expanded = 0;
204 :
205 61146 : repeat:
206 61146 : fdt = files_fdtable(files);
207 :
208 : /* Do we need to expand? */
209 61151 : if (nr < fdt->max_fds)
210 61149 : return expanded;
211 :
212 : /* Can we expand? */
213 2 : if (nr >= sysctl_nr_open)
214 : return -EMFILE;
215 :
216 2 : if (unlikely(files->resize_in_progress)) {
217 0 : spin_unlock(&files->file_lock);
218 0 : expanded = 1;
219 0 : wait_event(files->resize_wait, !files->resize_in_progress);
220 0 : spin_lock(&files->file_lock);
221 0 : goto repeat;
222 : }
223 :
224 : /* All good, so we try */
225 2 : files->resize_in_progress = true;
226 2 : expanded = expand_fdtable(files, nr);
227 2 : files->resize_in_progress = false;
228 :
229 2 : wake_up_all(&files->resize_wait);
230 2 : return expanded;
231 : }
232 :
233 57294 : static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
234 : {
235 57294 : __set_bit(fd, fdt->close_on_exec);
236 57300 : }
237 :
238 5064 : static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
239 : {
240 5064 : if (test_bit(fd, fdt->close_on_exec))
241 1016 : __clear_bit(fd, fdt->close_on_exec);
242 5064 : }
243 :
244 61143 : static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
245 : {
246 61143 : __set_bit(fd, fdt->open_fds);
247 61143 : fd /= BITS_PER_LONG;
248 61143 : if (!~fdt->open_fds[fd])
249 0 : __set_bit(fd, fdt->full_fds_bits);
250 61143 : }
251 :
252 143766 : static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
253 : {
254 143766 : __clear_bit(fd, fdt->open_fds);
255 143760 : __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
256 143761 : }
257 :
258 1357 : static unsigned int count_open_files(struct fdtable *fdt)
259 : {
260 1357 : unsigned int size = fdt->max_fds;
261 1357 : unsigned int i;
262 :
263 : /* Find the last open fd */
264 2426 : for (i = size / BITS_PER_LONG; i > 0; ) {
265 2422 : if (fdt->open_fds[--i])
266 : break;
267 : }
268 1357 : i = (i + 1) * BITS_PER_LONG;
269 1357 : return i;
270 : }
271 :
272 1357 : static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
273 : {
274 1357 : unsigned int count;
275 :
276 1357 : count = count_open_files(fdt);
277 1357 : if (max_fds < NR_OPEN_DEFAULT)
278 : max_fds = NR_OPEN_DEFAULT;
279 1357 : return min(count, max_fds);
280 : }
281 :
282 : /*
283 : * Allocate a new files structure and copy contents from the
284 : * passed in files structure.
285 : * errorp will be valid only when the returned files_struct is NULL.
286 : */
287 1353 : struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
288 : {
289 1353 : struct files_struct *newf;
290 1353 : struct file **old_fds, **new_fds;
291 1353 : unsigned int open_files, i;
292 1353 : struct fdtable *old_fdt, *new_fdt;
293 :
294 1353 : *errorp = -ENOMEM;
295 1353 : newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
296 1353 : if (!newf)
297 0 : goto out;
298 :
299 1353 : atomic_set(&newf->count, 1);
300 :
301 1353 : spin_lock_init(&newf->file_lock);
302 1353 : newf->resize_in_progress = false;
303 1353 : init_waitqueue_head(&newf->resize_wait);
304 1353 : newf->next_fd = 0;
305 1353 : new_fdt = &newf->fdtab;
306 1353 : new_fdt->max_fds = NR_OPEN_DEFAULT;
307 1353 : new_fdt->close_on_exec = newf->close_on_exec_init;
308 1353 : new_fdt->open_fds = newf->open_fds_init;
309 1353 : new_fdt->full_fds_bits = newf->full_fds_bits_init;
310 1353 : new_fdt->fd = &newf->fd_array[0];
311 :
312 1353 : spin_lock(&oldf->file_lock);
313 1353 : old_fdt = files_fdtable(oldf);
314 1353 : open_files = sane_fdtable_size(old_fdt, max_fds);
315 :
316 : /*
317 : * Check whether we need to allocate a larger fd array and fd set.
318 : */
319 1357 : while (unlikely(open_files > new_fdt->max_fds)) {
320 4 : spin_unlock(&oldf->file_lock);
321 :
322 4 : if (new_fdt != &newf->fdtab)
323 0 : __free_fdtable(new_fdt);
324 :
325 4 : new_fdt = alloc_fdtable(open_files - 1);
326 4 : if (!new_fdt) {
327 0 : *errorp = -ENOMEM;
328 0 : goto out_release;
329 : }
330 :
331 : /* beyond sysctl_nr_open; nothing to do */
332 4 : if (unlikely(new_fdt->max_fds < open_files)) {
333 0 : __free_fdtable(new_fdt);
334 0 : *errorp = -EMFILE;
335 0 : goto out_release;
336 : }
337 :
338 : /*
339 : * Reacquire the oldf lock and a pointer to its fd table
340 : * who knows it may have a new bigger fd table. We need
341 : * the latest pointer.
342 : */
343 4 : spin_lock(&oldf->file_lock);
344 4 : old_fdt = files_fdtable(oldf);
345 4 : open_files = sane_fdtable_size(old_fdt, max_fds);
346 : }
347 :
348 1353 : copy_fd_bitmaps(new_fdt, old_fdt, open_files);
349 :
350 1353 : old_fds = old_fdt->fd;
351 1353 : new_fds = new_fdt->fd;
352 :
353 88713 : for (i = open_files; i != 0; i--) {
354 87360 : struct file *f = *old_fds++;
355 87360 : if (f) {
356 10436 : get_file(f);
357 : } else {
358 : /*
359 : * The fd may be claimed in the fd bitmap but not yet
360 : * instantiated in the files array if a sibling thread
361 : * is partway through open(). So make sure that this
362 : * fd is available to the new process.
363 : */
364 76924 : __clear_open_fd(open_files - i, new_fdt);
365 : }
366 87360 : rcu_assign_pointer(*new_fds++, f);
367 : }
368 1353 : spin_unlock(&oldf->file_lock);
369 :
370 : /* clear the remainder */
371 1353 : memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
372 :
373 1353 : rcu_assign_pointer(newf->fdt, new_fdt);
374 :
375 1353 : return newf;
376 :
377 0 : out_release:
378 0 : kmem_cache_free(files_cachep, newf);
379 : out:
380 : return NULL;
381 : }
382 :
383 1329 : static struct fdtable *close_files(struct files_struct * files)
384 : {
385 : /*
386 : * It is safe to dereference the fd table without RCU or
387 : * ->file_lock because this is the last reference to the
388 : * files structure.
389 : */
390 1329 : struct fdtable *fdt = rcu_dereference_raw(files->fdt);
391 1329 : unsigned int i, j = 0;
392 :
393 2670 : for (;;) {
394 2670 : unsigned long set;
395 2670 : i = j * BITS_PER_LONG;
396 2670 : if (i >= fdt->max_fds)
397 : break;
398 1341 : set = fdt->open_fds[j++];
399 5271 : while (set) {
400 3930 : if (set & 1) {
401 3268 : struct file * file = xchg(&fdt->fd[i], NULL);
402 3268 : if (file) {
403 3268 : filp_close(file, files);
404 3268 : cond_resched();
405 : }
406 : }
407 3930 : i++;
408 3930 : set >>= 1;
409 : }
410 : }
411 :
412 1329 : return fdt;
413 : }
414 :
415 1331 : void put_files_struct(struct files_struct *files)
416 : {
417 2662 : if (atomic_dec_and_test(&files->count)) {
418 1329 : struct fdtable *fdt = close_files(files);
419 :
420 : /* free the arrays if they are not embedded */
421 1329 : if (fdt != &files->fdtab)
422 4 : __free_fdtable(fdt);
423 1329 : kmem_cache_free(files_cachep, files);
424 : }
425 1331 : }
426 :
427 1331 : void exit_files(struct task_struct *tsk)
428 : {
429 1331 : struct files_struct * files = tsk->files;
430 :
431 1331 : if (files) {
432 1331 : task_lock(tsk);
433 1331 : tsk->files = NULL;
434 1331 : task_unlock(tsk);
435 1331 : put_files_struct(files);
436 : }
437 1331 : }
438 :
439 : struct files_struct init_files = {
440 : .count = ATOMIC_INIT(1),
441 : .fdt = &init_files.fdtab,
442 : .fdtab = {
443 : .max_fds = NR_OPEN_DEFAULT,
444 : .fd = &init_files.fd_array[0],
445 : .close_on_exec = init_files.close_on_exec_init,
446 : .open_fds = init_files.open_fds_init,
447 : .full_fds_bits = init_files.full_fds_bits_init,
448 : },
449 : .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
450 : .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
451 : };
452 :
453 59318 : static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
454 : {
455 59318 : unsigned int maxfd = fdt->max_fds;
456 59318 : unsigned int maxbit = maxfd / BITS_PER_LONG;
457 59318 : unsigned int bitbit = start / BITS_PER_LONG;
458 :
459 59318 : bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
460 59317 : if (bitbit > maxfd)
461 : return maxfd;
462 59317 : if (bitbit > start)
463 : start = bitbit;
464 59317 : return find_next_zero_bit(fdt->open_fds, maxfd, start);
465 : }
466 :
467 : /*
468 : * allocate a file descriptor, mark it busy.
469 : */
470 59318 : static int alloc_fd(unsigned start, unsigned end, unsigned flags)
471 : {
472 59318 : struct files_struct *files = current->files;
473 59318 : unsigned int fd;
474 59318 : int error;
475 59318 : struct fdtable *fdt;
476 :
477 59318 : spin_lock(&files->file_lock);
478 59321 : repeat:
479 59321 : fdt = files_fdtable(files);
480 59317 : fd = start;
481 59317 : if (fd < files->next_fd)
482 : fd = files->next_fd;
483 :
484 59317 : if (fd < fdt->max_fds)
485 59318 : fd = find_next_fd(fdt, fd);
486 :
487 : /*
488 : * N.B. For clone tasks sharing a files structure, this test
489 : * will limit the total number of files that can be opened.
490 : */
491 59326 : error = -EMFILE;
492 59326 : if (fd >= end)
493 0 : goto out;
494 :
495 59326 : error = expand_files(files, fd);
496 59317 : if (error < 0)
497 0 : goto out;
498 :
499 : /*
500 : * If we needed to expand the fs array we
501 : * might have blocked - try again.
502 : */
503 59317 : if (error)
504 0 : goto repeat;
505 :
506 59317 : if (start <= files->next_fd)
507 58670 : files->next_fd = fd + 1;
508 :
509 59317 : __set_open_fd(fd, fdt);
510 59309 : if (flags & O_CLOEXEC)
511 56090 : __set_close_on_exec(fd, fdt);
512 : else
513 3219 : __clear_close_on_exec(fd, fdt);
514 59319 : error = fd;
515 : #if 1
516 : /* Sanity check */
517 59319 : if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
518 0 : printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
519 0 : rcu_assign_pointer(fdt->fd[fd], NULL);
520 : }
521 : #endif
522 :
523 59319 : out:
524 59319 : spin_unlock(&files->file_lock);
525 59319 : return error;
526 : }
527 :
528 58015 : int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
529 : {
530 58015 : return alloc_fd(0, nofile, flags);
531 : }
532 :
533 57924 : int get_unused_fd_flags(unsigned flags)
534 : {
535 57924 : return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
536 : }
537 : EXPORT_SYMBOL(get_unused_fd_flags);
538 :
539 66838 : static void __put_unused_fd(struct files_struct *files, unsigned int fd)
540 : {
541 66838 : struct fdtable *fdt = files_fdtable(files);
542 66850 : __clear_open_fd(fd, fdt);
543 66839 : if (fd < files->next_fd)
544 45339 : files->next_fd = fd;
545 66839 : }
546 :
547 12870 : void put_unused_fd(unsigned int fd)
548 : {
549 12870 : struct files_struct *files = current->files;
550 12870 : spin_lock(&files->file_lock);
551 12872 : __put_unused_fd(files, fd);
552 12872 : spin_unlock(&files->file_lock);
553 12872 : }
554 :
555 : EXPORT_SYMBOL(put_unused_fd);
556 :
557 : /*
558 : * Install a file pointer in the fd array.
559 : *
560 : * The VFS is full of places where we drop the files lock between
561 : * setting the open_fds bitmap and installing the file in the file
562 : * array. At any such point, we are vulnerable to a dup2() race
563 : * installing a file in the array before us. We need to detect this and
564 : * fput() the struct file we are about to overwrite in this case.
565 : *
566 : * It should never happen - if we allow dup2() do it, _really_ bad things
567 : * will follow.
568 : *
569 : * This consumes the "file" refcount, so callers should treat it
570 : * as if they had called fput(file).
571 : */
572 :
573 46440 : void fd_install(unsigned int fd, struct file *file)
574 : {
575 46440 : struct files_struct *files = current->files;
576 46440 : struct fdtable *fdt;
577 :
578 46440 : rcu_read_lock_sched();
579 :
580 46445 : if (unlikely(files->resize_in_progress)) {
581 0 : rcu_read_unlock_sched();
582 0 : spin_lock(&files->file_lock);
583 0 : fdt = files_fdtable(files);
584 0 : BUG_ON(fdt->fd[fd] != NULL);
585 0 : rcu_assign_pointer(fdt->fd[fd], file);
586 0 : spin_unlock(&files->file_lock);
587 0 : return;
588 : }
589 : /* coupled with smp_wmb() in expand_fdtable() */
590 46445 : smp_rmb();
591 46446 : fdt = rcu_dereference_sched(files->fdt);
592 46443 : BUG_ON(fdt->fd[fd] != NULL);
593 46443 : rcu_assign_pointer(fdt->fd[fd], file);
594 46443 : rcu_read_unlock_sched();
595 : }
596 :
597 : EXPORT_SYMBOL(fd_install);
598 :
599 51068 : static struct file *pick_file(struct files_struct *files, unsigned fd)
600 : {
601 51068 : struct file *file = NULL;
602 51068 : struct fdtable *fdt;
603 :
604 51068 : spin_lock(&files->file_lock);
605 51096 : fdt = files_fdtable(files);
606 51094 : if (fd >= fdt->max_fds)
607 80 : goto out_unlock;
608 51014 : file = fdt->fd[fd];
609 51014 : if (!file)
610 5 : goto out_unlock;
611 51009 : rcu_assign_pointer(fdt->fd[fd], NULL);
612 51009 : __put_unused_fd(files, fd);
613 :
614 51090 : out_unlock:
615 51090 : spin_unlock(&files->file_lock);
616 51091 : return file;
617 : }
618 :
619 51070 : int close_fd(unsigned fd)
620 : {
621 51070 : struct files_struct *files = current->files;
622 51070 : struct file *file;
623 :
624 51070 : file = pick_file(files, fd);
625 51094 : if (!file)
626 : return -EBADF;
627 :
628 51009 : return filp_close(file, files);
629 : }
630 : EXPORT_SYMBOL(close_fd); /* for ksys_close() */
631 :
632 0 : static inline void __range_cloexec(struct files_struct *cur_fds,
633 : unsigned int fd, unsigned int max_fd)
634 : {
635 0 : struct fdtable *fdt;
636 :
637 0 : if (fd > max_fd)
638 : return;
639 :
640 0 : spin_lock(&cur_fds->file_lock);
641 0 : fdt = files_fdtable(cur_fds);
642 0 : bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
643 0 : spin_unlock(&cur_fds->file_lock);
644 : }
645 :
646 0 : static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
647 : unsigned int max_fd)
648 : {
649 0 : while (fd <= max_fd) {
650 0 : struct file *file;
651 :
652 0 : file = pick_file(cur_fds, fd++);
653 0 : if (!file)
654 0 : continue;
655 :
656 0 : filp_close(file, cur_fds);
657 0 : cond_resched();
658 : }
659 0 : }
660 :
661 : /**
662 : * __close_range() - Close all file descriptors in a given range.
663 : *
664 : * @fd: starting file descriptor to close
665 : * @max_fd: last file descriptor to close
666 : *
667 : * This closes a range of file descriptors. All file descriptors
668 : * from @fd up to and including @max_fd are closed.
669 : */
670 0 : int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
671 : {
672 0 : unsigned int cur_max;
673 0 : struct task_struct *me = current;
674 0 : struct files_struct *cur_fds = me->files, *fds = NULL;
675 :
676 0 : if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
677 : return -EINVAL;
678 :
679 0 : if (fd > max_fd)
680 : return -EINVAL;
681 :
682 0 : rcu_read_lock();
683 0 : cur_max = files_fdtable(cur_fds)->max_fds;
684 0 : rcu_read_unlock();
685 :
686 : /* cap to last valid index into fdtable */
687 0 : cur_max--;
688 :
689 0 : if (flags & CLOSE_RANGE_UNSHARE) {
690 0 : int ret;
691 0 : unsigned int max_unshare_fds = NR_OPEN_MAX;
692 :
693 : /*
694 : * If the requested range is greater than the current maximum,
695 : * we're closing everything so only copy all file descriptors
696 : * beneath the lowest file descriptor.
697 : * If the caller requested all fds to be made cloexec copy all
698 : * of the file descriptors since they still want to use them.
699 : */
700 0 : if (!(flags & CLOSE_RANGE_CLOEXEC) && (max_fd >= cur_max))
701 0 : max_unshare_fds = fd;
702 :
703 0 : ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
704 0 : if (ret)
705 : return ret;
706 :
707 : /*
708 : * We used to share our file descriptor table, and have now
709 : * created a private one, make sure we're using it below.
710 : */
711 0 : if (fds)
712 0 : swap(cur_fds, fds);
713 : }
714 :
715 0 : max_fd = min(max_fd, cur_max);
716 :
717 0 : if (flags & CLOSE_RANGE_CLOEXEC)
718 0 : __range_cloexec(cur_fds, fd, max_fd);
719 : else
720 0 : __range_close(cur_fds, fd, max_fd);
721 :
722 0 : if (fds) {
723 : /*
724 : * We're done closing the files we were supposed to. Time to install
725 : * the new file descriptor table and drop the old one.
726 : */
727 0 : task_lock(me);
728 0 : me->files = cur_fds;
729 0 : task_unlock(me);
730 0 : put_files_struct(fds);
731 : }
732 :
733 : return 0;
734 : }
735 :
736 : /*
737 : * See close_fd_get_file() below, this variant assumes current->files->file_lock
738 : * is held.
739 : */
740 0 : int __close_fd_get_file(unsigned int fd, struct file **res)
741 : {
742 0 : struct files_struct *files = current->files;
743 0 : struct file *file;
744 0 : struct fdtable *fdt;
745 :
746 0 : fdt = files_fdtable(files);
747 0 : if (fd >= fdt->max_fds)
748 0 : goto out_err;
749 0 : file = fdt->fd[fd];
750 0 : if (!file)
751 0 : goto out_err;
752 0 : rcu_assign_pointer(fdt->fd[fd], NULL);
753 0 : __put_unused_fd(files, fd);
754 0 : get_file(file);
755 0 : *res = file;
756 0 : return 0;
757 0 : out_err:
758 0 : *res = NULL;
759 0 : return -ENOENT;
760 : }
761 :
762 : /*
763 : * variant of close_fd that gets a ref on the file for later fput.
764 : * The caller must ensure that filp_close() called on the file, and then
765 : * an fput().
766 : */
767 0 : int close_fd_get_file(unsigned int fd, struct file **res)
768 : {
769 0 : struct files_struct *files = current->files;
770 0 : int ret;
771 :
772 0 : spin_lock(&files->file_lock);
773 0 : ret = __close_fd_get_file(fd, res);
774 0 : spin_unlock(&files->file_lock);
775 :
776 0 : return ret;
777 : }
778 :
779 1043 : void do_close_on_exec(struct files_struct *files)
780 : {
781 1043 : unsigned i;
782 1043 : struct fdtable *fdt;
783 :
784 : /* exec unshares first */
785 1043 : spin_lock(&files->file_lock);
786 2095 : for (i = 0; ; i++) {
787 2095 : unsigned long set;
788 2095 : unsigned fd = i * BITS_PER_LONG;
789 2095 : fdt = files_fdtable(files);
790 2095 : if (fd >= fdt->max_fds)
791 : break;
792 1052 : set = fdt->close_on_exec[i];
793 1052 : if (!set)
794 37 : continue;
795 1015 : fdt->close_on_exec[i] = 0;
796 14497 : for ( ; set ; fd++, set >>= 1) {
797 13482 : struct file *file;
798 13482 : if (!(set & 1))
799 6643 : continue;
800 6839 : file = fdt->fd[fd];
801 6839 : if (!file)
802 3875 : continue;
803 2964 : rcu_assign_pointer(fdt->fd[fd], NULL);
804 2964 : __put_unused_fd(files, fd);
805 2964 : spin_unlock(&files->file_lock);
806 2964 : filp_close(file, files);
807 2964 : cond_resched();
808 16446 : spin_lock(&files->file_lock);
809 : }
810 :
811 : }
812 1043 : spin_unlock(&files->file_lock);
813 1043 : }
814 :
815 19613 : static struct file *__fget_files(struct files_struct *files, unsigned int fd,
816 : fmode_t mask, unsigned int refs)
817 : {
818 19613 : struct file *file;
819 :
820 19613 : rcu_read_lock();
821 19612 : loop:
822 19612 : file = files_lookup_fd_rcu(files, fd);
823 19613 : if (file) {
824 : /* File object ref couldn't be taken.
825 : * dup2() atomicity guarantee is the reason
826 : * we loop to catch the new file (or NULL pointer)
827 : */
828 19613 : if (file->f_mode & mask)
829 : file = NULL;
830 39226 : else if (!get_file_rcu_many(file, refs))
831 0 : goto loop;
832 : }
833 19613 : rcu_read_unlock();
834 :
835 19613 : return file;
836 : }
837 :
838 19508 : static inline struct file *__fget(unsigned int fd, fmode_t mask,
839 : unsigned int refs)
840 : {
841 19508 : return __fget_files(current->files, fd, mask, refs);
842 : }
843 :
844 0 : struct file *fget_many(unsigned int fd, unsigned int refs)
845 : {
846 0 : return __fget(fd, FMODE_PATH, refs);
847 : }
848 :
849 17948 : struct file *fget(unsigned int fd)
850 : {
851 17948 : return __fget(fd, FMODE_PATH, 1);
852 : }
853 : EXPORT_SYMBOL(fget);
854 :
855 67 : struct file *fget_raw(unsigned int fd)
856 : {
857 67 : return __fget(fd, 0, 1);
858 : }
859 : EXPORT_SYMBOL(fget_raw);
860 :
861 105 : struct file *fget_task(struct task_struct *task, unsigned int fd)
862 : {
863 105 : struct file *file = NULL;
864 :
865 105 : task_lock(task);
866 105 : if (task->files)
867 105 : file = __fget_files(task->files, fd, 0, 1);
868 105 : task_unlock(task);
869 :
870 105 : return file;
871 : }
872 :
873 244 : struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
874 : {
875 : /* Must be called with rcu_read_lock held */
876 244 : struct files_struct *files;
877 244 : struct file *file = NULL;
878 :
879 244 : task_lock(task);
880 244 : files = task->files;
881 244 : if (files)
882 244 : file = files_lookup_fd_rcu(files, fd);
883 244 : task_unlock(task);
884 :
885 244 : return file;
886 : }
887 :
888 3157 : struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
889 : {
890 : /* Must be called with rcu_read_lock held */
891 3157 : struct files_struct *files;
892 3157 : unsigned int fd = *ret_fd;
893 3157 : struct file *file = NULL;
894 :
895 3157 : task_lock(task);
896 3157 : files = task->files;
897 3157 : if (files) {
898 10560 : for (; fd < files_fdtable(files)->max_fds; fd++) {
899 10240 : file = files_lookup_fd_rcu(files, fd);
900 10240 : if (file)
901 : break;
902 : }
903 : }
904 3157 : task_unlock(task);
905 3157 : *ret_fd = fd;
906 3157 : return file;
907 : }
908 :
909 : /*
910 : * Lightweight file lookup - no refcnt increment if fd table isn't shared.
911 : *
912 : * You can use this instead of fget if you satisfy all of the following
913 : * conditions:
914 : * 1) You must call fput_light before exiting the syscall and returning control
915 : * to userspace (i.e. you cannot remember the returned struct file * after
916 : * returning to userspace).
917 : * 2) You must not call filp_close on the returned struct file * in between
918 : * calls to fget_light and fput_light.
919 : * 3) You must not clone the current task in between the calls to fget_light
920 : * and fput_light.
921 : *
922 : * The fput_needed flag returned by fget_light should be passed to the
923 : * corresponding fput_light.
924 : */
925 146552 : static unsigned long __fget_light(unsigned int fd, fmode_t mask)
926 : {
927 146552 : struct files_struct *files = current->files;
928 146552 : struct file *file;
929 :
930 146552 : if (atomic_read(&files->count) == 1) {
931 145068 : file = files_lookup_fd_raw(files, fd);
932 145080 : if (!file || unlikely(file->f_mode & mask))
933 : return 0;
934 145028 : return (unsigned long)file;
935 : } else {
936 1493 : file = __fget(fd, mask, 1);
937 1493 : if (!file)
938 : return 0;
939 1493 : return FDPUT_FPUT | (unsigned long)file;
940 : }
941 : }
942 74605 : unsigned long __fdget(unsigned int fd)
943 : {
944 33827 : return __fget_light(fd, FMODE_PATH);
945 : }
946 : EXPORT_SYMBOL(__fdget);
947 :
948 71955 : unsigned long __fdget_raw(unsigned int fd)
949 : {
950 71955 : return __fget_light(fd, 0);
951 : }
952 :
953 40778 : unsigned long __fdget_pos(unsigned int fd)
954 : {
955 40778 : unsigned long v = __fdget(fd);
956 40779 : struct file *file = (struct file *)(v & ~3);
957 :
958 40779 : if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
959 20358 : if (file_count(file) > 1) {
960 2077 : v |= FDPUT_POS_UNLOCK;
961 2077 : mutex_lock(&file->f_pos_lock);
962 : }
963 : }
964 40779 : return v;
965 : }
966 :
967 2076 : void __f_unlock_pos(struct file *f)
968 : {
969 2076 : mutex_unlock(&f->f_pos_lock);
970 2076 : }
971 :
972 : /*
973 : * We only lock f_pos if we have threads or if the file might be
974 : * shared with another process. In both cases we'll have an elevated
975 : * file count (done either by fdget() or by fork()).
976 : */
977 :
978 1215 : void set_close_on_exec(unsigned int fd, int flag)
979 : {
980 1215 : struct files_struct *files = current->files;
981 1215 : struct fdtable *fdt;
982 1215 : spin_lock(&files->file_lock);
983 1215 : fdt = files_fdtable(files);
984 1215 : if (flag)
985 1204 : __set_close_on_exec(fd, fdt);
986 : else
987 11 : __clear_close_on_exec(fd, fdt);
988 1215 : spin_unlock(&files->file_lock);
989 1215 : }
990 :
991 91 : bool get_close_on_exec(unsigned int fd)
992 : {
993 91 : struct files_struct *files = current->files;
994 91 : struct fdtable *fdt;
995 91 : bool res;
996 91 : rcu_read_lock();
997 91 : fdt = files_fdtable(files);
998 91 : res = close_on_exec(fd, fdt);
999 91 : rcu_read_unlock();
1000 91 : return res;
1001 : }
1002 :
1003 1834 : static int do_dup2(struct files_struct *files,
1004 : struct file *file, unsigned fd, unsigned flags)
1005 : __releases(&files->file_lock)
1006 : {
1007 1834 : struct file *tofree;
1008 1834 : struct fdtable *fdt;
1009 :
1010 : /*
1011 : * We need to detect attempts to do dup2() over allocated but still
1012 : * not finished descriptor. NB: OpenBSD avoids that at the price of
1013 : * extra work in their equivalent of fget() - they insert struct
1014 : * file immediately after grabbing descriptor, mark it larval if
1015 : * more work (e.g. actual opening) is needed and make sure that
1016 : * fget() treats larval files as absent. Potentially interesting,
1017 : * but while extra work in fget() is trivial, locking implications
1018 : * and amount of surgery on open()-related paths in VFS are not.
1019 : * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1020 : * deadlocks in rather amusing ways, AFAICS. All of that is out of
1021 : * scope of POSIX or SUS, since neither considers shared descriptor
1022 : * tables and this condition does not arise without those.
1023 : */
1024 1834 : fdt = files_fdtable(files);
1025 1834 : tofree = fdt->fd[fd];
1026 2432 : if (!tofree && fd_is_open(fd, fdt))
1027 0 : goto Ebusy;
1028 1834 : get_file(file);
1029 1834 : rcu_assign_pointer(fdt->fd[fd], file);
1030 1834 : __set_open_fd(fd, fdt);
1031 1834 : if (flags & O_CLOEXEC)
1032 0 : __set_close_on_exec(fd, fdt);
1033 : else
1034 1834 : __clear_close_on_exec(fd, fdt);
1035 1834 : spin_unlock(&files->file_lock);
1036 :
1037 1834 : if (tofree)
1038 1236 : filp_close(tofree, files);
1039 :
1040 1834 : return fd;
1041 :
1042 0 : Ebusy:
1043 0 : spin_unlock(&files->file_lock);
1044 0 : return -EBUSY;
1045 : }
1046 :
1047 0 : int replace_fd(unsigned fd, struct file *file, unsigned flags)
1048 : {
1049 0 : int err;
1050 0 : struct files_struct *files = current->files;
1051 :
1052 0 : if (!file)
1053 0 : return close_fd(fd);
1054 :
1055 0 : if (fd >= rlimit(RLIMIT_NOFILE))
1056 : return -EBADF;
1057 :
1058 0 : spin_lock(&files->file_lock);
1059 0 : err = expand_files(files, fd);
1060 0 : if (unlikely(err < 0))
1061 0 : goto out_unlock;
1062 0 : return do_dup2(files, file, fd, flags);
1063 :
1064 0 : out_unlock:
1065 0 : spin_unlock(&files->file_lock);
1066 0 : return err;
1067 : }
1068 :
1069 : /**
1070 : * __receive_fd() - Install received file into file descriptor table
1071 : *
1072 : * @fd: fd to install into (if negative, a new fd will be allocated)
1073 : * @file: struct file that was received from another process
1074 : * @ufd: __user pointer to write new fd number to
1075 : * @o_flags: the O_* flags to apply to the new fd entry
1076 : *
1077 : * Installs a received file into the file descriptor table, with appropriate
1078 : * checks and count updates. Optionally writes the fd number to userspace, if
1079 : * @ufd is non-NULL.
1080 : *
1081 : * This helper handles its own reference counting of the incoming
1082 : * struct file.
1083 : *
1084 : * Returns newly install fd or -ve on error.
1085 : */
1086 54 : int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags)
1087 : {
1088 54 : int new_fd;
1089 54 : int error;
1090 :
1091 54 : error = security_file_receive(file);
1092 54 : if (error)
1093 : return error;
1094 :
1095 54 : if (fd < 0) {
1096 54 : new_fd = get_unused_fd_flags(o_flags);
1097 54 : if (new_fd < 0)
1098 : return new_fd;
1099 : } else {
1100 : new_fd = fd;
1101 : }
1102 :
1103 54 : if (ufd) {
1104 54 : error = put_user(new_fd, ufd);
1105 54 : if (error) {
1106 0 : if (fd < 0)
1107 0 : put_unused_fd(new_fd);
1108 0 : return error;
1109 : }
1110 : }
1111 :
1112 54 : if (fd < 0) {
1113 54 : fd_install(new_fd, get_file(file));
1114 : } else {
1115 0 : error = replace_fd(new_fd, file, o_flags);
1116 0 : if (error)
1117 : return error;
1118 : }
1119 :
1120 : /* Bump the sock usage counts, if any. */
1121 54 : __receive_sock(file);
1122 54 : return new_fd;
1123 : }
1124 :
1125 1834 : static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1126 : {
1127 1834 : int err = -EBADF;
1128 1834 : struct file *file;
1129 1834 : struct files_struct *files = current->files;
1130 :
1131 1834 : if ((flags & ~O_CLOEXEC) != 0)
1132 : return -EINVAL;
1133 :
1134 1834 : if (unlikely(oldfd == newfd))
1135 : return -EINVAL;
1136 :
1137 1834 : if (newfd >= rlimit(RLIMIT_NOFILE))
1138 : return -EBADF;
1139 :
1140 1834 : spin_lock(&files->file_lock);
1141 1834 : err = expand_files(files, newfd);
1142 1834 : file = files_lookup_fd_locked(files, oldfd);
1143 1834 : if (unlikely(!file))
1144 0 : goto Ebadf;
1145 1834 : if (unlikely(err < 0)) {
1146 0 : if (err == -EMFILE)
1147 0 : goto Ebadf;
1148 0 : goto out_unlock;
1149 : }
1150 1834 : return do_dup2(files, file, newfd, flags);
1151 :
1152 0 : Ebadf:
1153 : err = -EBADF;
1154 0 : out_unlock:
1155 0 : spin_unlock(&files->file_lock);
1156 0 : return err;
1157 : }
1158 :
1159 188 : SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1160 : {
1161 94 : return ksys_dup3(oldfd, newfd, flags);
1162 : }
1163 :
1164 3480 : SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1165 : {
1166 1740 : if (unlikely(newfd == oldfd)) { /* corner case */
1167 0 : struct files_struct *files = current->files;
1168 0 : int retval = oldfd;
1169 :
1170 0 : rcu_read_lock();
1171 0 : if (!files_lookup_fd_rcu(files, oldfd))
1172 0 : retval = -EBADF;
1173 0 : rcu_read_unlock();
1174 0 : return retval;
1175 : }
1176 1740 : return ksys_dup3(oldfd, newfd, 0);
1177 : }
1178 :
1179 26 : SYSCALL_DEFINE1(dup, unsigned int, fildes)
1180 : {
1181 13 : int ret = -EBADF;
1182 13 : struct file *file = fget_raw(fildes);
1183 :
1184 13 : if (file) {
1185 13 : ret = get_unused_fd_flags(0);
1186 13 : if (ret >= 0)
1187 13 : fd_install(ret, file);
1188 : else
1189 0 : fput(file);
1190 : }
1191 13 : return ret;
1192 : }
1193 :
1194 1301 : int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1195 : {
1196 1301 : unsigned long nofile = rlimit(RLIMIT_NOFILE);
1197 1301 : int err;
1198 1301 : if (from >= nofile)
1199 : return -EINVAL;
1200 1301 : err = alloc_fd(from, nofile, flags);
1201 1301 : if (err >= 0) {
1202 1301 : get_file(file);
1203 1301 : fd_install(err, file);
1204 : }
1205 : return err;
1206 : }
1207 :
1208 0 : int iterate_fd(struct files_struct *files, unsigned n,
1209 : int (*f)(const void *, struct file *, unsigned),
1210 : const void *p)
1211 : {
1212 0 : struct fdtable *fdt;
1213 0 : int res = 0;
1214 0 : if (!files)
1215 : return 0;
1216 0 : spin_lock(&files->file_lock);
1217 0 : for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1218 0 : struct file *file;
1219 0 : file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1220 0 : if (!file)
1221 0 : continue;
1222 0 : res = f(p, file, n);
1223 0 : if (res)
1224 : break;
1225 : }
1226 0 : spin_unlock(&files->file_lock);
1227 0 : return res;
1228 : }
1229 : EXPORT_SYMBOL(iterate_fd);
|