Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * linux/fs/stat.c
4 : *
5 : * Copyright (C) 1991, 1992 Linus Torvalds
6 : */
7 :
8 : #include <linux/export.h>
9 : #include <linux/mm.h>
10 : #include <linux/errno.h>
11 : #include <linux/file.h>
12 : #include <linux/highuid.h>
13 : #include <linux/fs.h>
14 : #include <linux/namei.h>
15 : #include <linux/security.h>
16 : #include <linux/cred.h>
17 : #include <linux/syscalls.h>
18 : #include <linux/pagemap.h>
19 : #include <linux/compat.h>
20 :
21 : #include <linux/uaccess.h>
22 : #include <asm/unistd.h>
23 :
24 : #include "internal.h"
25 : #include "mount.h"
26 :
27 : /**
28 : * generic_fillattr - Fill in the basic attributes from the inode struct
29 : * @mnt_userns: user namespace of the mount the inode was found from
30 : * @inode: Inode to use as the source
31 : * @stat: Where to fill in the attributes
32 : *
33 : * Fill in the basic attributes in the kstat structure from data that's to be
34 : * found on the VFS inode structure. This is the default if no getattr inode
35 : * operation is supplied.
36 : *
37 : * If the inode has been found through an idmapped mount the user namespace of
38 : * the vfsmount must be passed through @mnt_userns. This function will then
39 : * take care to map the inode according to @mnt_userns before filling in the
40 : * uid and gid filds. On non-idmapped mounts or if permission checking is to be
41 : * performed on the raw inode simply passs init_user_ns.
42 : */
43 46488 : void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
44 : struct kstat *stat)
45 : {
46 46488 : stat->dev = inode->i_sb->s_dev;
47 46488 : stat->ino = inode->i_ino;
48 46488 : stat->mode = inode->i_mode;
49 46488 : stat->nlink = inode->i_nlink;
50 46488 : stat->uid = i_uid_into_mnt(mnt_userns, inode);
51 46488 : stat->gid = i_gid_into_mnt(mnt_userns, inode);
52 46488 : stat->rdev = inode->i_rdev;
53 46488 : stat->size = i_size_read(inode);
54 46488 : stat->atime = inode->i_atime;
55 46488 : stat->mtime = inode->i_mtime;
56 46488 : stat->ctime = inode->i_ctime;
57 46488 : stat->blksize = i_blocksize(inode);
58 46488 : stat->blocks = inode->i_blocks;
59 46488 : }
60 : EXPORT_SYMBOL(generic_fillattr);
61 :
62 : /**
63 : * vfs_getattr_nosec - getattr without security checks
64 : * @path: file to get attributes from
65 : * @stat: structure to return attributes in
66 : * @request_mask: STATX_xxx flags indicating what the caller wants
67 : * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
68 : *
69 : * Get attributes without calling security_inode_getattr.
70 : *
71 : * Currently the only caller other than vfs_getattr is internal to the
72 : * filehandle lookup code, which uses only the inode number and returns no
73 : * attributes to any user. Any other code probably wants vfs_getattr.
74 : */
75 46489 : int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
76 : u32 request_mask, unsigned int query_flags)
77 : {
78 46489 : struct user_namespace *mnt_userns;
79 46489 : struct inode *inode = d_backing_inode(path->dentry);
80 :
81 46489 : memset(stat, 0, sizeof(*stat));
82 46489 : stat->result_mask |= STATX_BASIC_STATS;
83 46489 : query_flags &= AT_STATX_SYNC_TYPE;
84 :
85 : /* allow the fs to override these if it really wants to */
86 : /* SB_NOATIME means filesystem supplies dummy atime value */
87 46489 : if (inode->i_sb->s_flags & SB_NOATIME)
88 0 : stat->result_mask &= ~STATX_ATIME;
89 46489 : if (IS_AUTOMOUNT(inode))
90 0 : stat->attributes |= STATX_ATTR_AUTOMOUNT;
91 :
92 46489 : if (IS_DAX(inode))
93 : stat->attributes |= STATX_ATTR_DAX;
94 :
95 46489 : mnt_userns = mnt_user_ns(path->mnt);
96 46490 : if (inode->i_op->getattr)
97 31704 : return inode->i_op->getattr(mnt_userns, path, stat,
98 : request_mask, query_flags);
99 :
100 14786 : generic_fillattr(mnt_userns, inode, stat);
101 14786 : return 0;
102 : }
103 : EXPORT_SYMBOL(vfs_getattr_nosec);
104 :
105 : /*
106 : * vfs_getattr - Get the enhanced basic attributes of a file
107 : * @path: The file of interest
108 : * @stat: Where to return the statistics
109 : * @request_mask: STATX_xxx flags indicating what the caller wants
110 : * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
111 : *
112 : * Ask the filesystem for a file's attributes. The caller must indicate in
113 : * request_mask and query_flags to indicate what they want.
114 : *
115 : * If the file is remote, the filesystem can be forced to update the attributes
116 : * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
117 : * suppress the update by passing AT_STATX_DONT_SYNC.
118 : *
119 : * Bits must have been set in request_mask to indicate which attributes the
120 : * caller wants retrieving. Any such attribute not requested may be returned
121 : * anyway, but the value may be approximate, and, if remote, may not have been
122 : * synchronised with the server.
123 : *
124 : * 0 will be returned on success, and a -ve error code if unsuccessful.
125 : */
126 46475 : int vfs_getattr(const struct path *path, struct kstat *stat,
127 : u32 request_mask, unsigned int query_flags)
128 : {
129 46475 : int retval;
130 :
131 46475 : retval = security_inode_getattr(path);
132 46487 : if (retval)
133 : return retval;
134 46489 : return vfs_getattr_nosec(path, stat, request_mask, query_flags);
135 : }
136 : EXPORT_SYMBOL(vfs_getattr);
137 :
138 : /**
139 : * vfs_fstat - Get the basic attributes by file descriptor
140 : * @fd: The file descriptor referring to the file of interest
141 : * @stat: The result structure to fill in.
142 : *
143 : * This function is a wrapper around vfs_getattr(). The main difference is
144 : * that it uses a file descriptor to determine the file location.
145 : *
146 : * 0 will be returned on success, and a -ve error code if unsuccessful.
147 : */
148 36222 : int vfs_fstat(int fd, struct kstat *stat)
149 : {
150 36222 : struct fd f;
151 36222 : int error;
152 :
153 36222 : f = fdget_raw(fd);
154 36257 : if (!f.file)
155 : return -EBADF;
156 36253 : error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
157 36257 : fdput(f);
158 36257 : return error;
159 : }
160 :
161 : /**
162 : * vfs_statx - Get basic and extra attributes by filename
163 : * @dfd: A file descriptor representing the base dir for a relative filename
164 : * @filename: The name of the file of interest
165 : * @flags: Flags to control the query
166 : * @stat: The result structure to fill in.
167 : * @request_mask: STATX_xxx flags indicating what the caller wants
168 : *
169 : * This function is a wrapper around vfs_getattr(). The main difference is
170 : * that it uses a filename and base directory to determine the file location.
171 : * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
172 : * at the given name from being referenced.
173 : *
174 : * 0 will be returned on success, and a -ve error code if unsuccessful.
175 : */
176 14207 : static int vfs_statx(int dfd, const char __user *filename, int flags,
177 : struct kstat *stat, u32 request_mask)
178 : {
179 14207 : struct path path;
180 14207 : unsigned lookup_flags = 0;
181 14207 : int error;
182 :
183 14207 : if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
184 : AT_STATX_SYNC_TYPE))
185 : return -EINVAL;
186 :
187 14207 : if (!(flags & AT_SYMLINK_NOFOLLOW))
188 11102 : lookup_flags |= LOOKUP_FOLLOW;
189 14207 : if (!(flags & AT_NO_AUTOMOUNT))
190 0 : lookup_flags |= LOOKUP_AUTOMOUNT;
191 14207 : if (flags & AT_EMPTY_PATH)
192 26 : lookup_flags |= LOOKUP_EMPTY;
193 :
194 14181 : retry:
195 14207 : error = user_path_at(dfd, filename, lookup_flags, &path);
196 14207 : if (error)
197 3982 : goto out;
198 :
199 10225 : error = vfs_getattr(&path, stat, request_mask, flags);
200 10224 : stat->mnt_id = real_mount(path.mnt)->mnt_id;
201 10224 : stat->result_mask |= STATX_MNT_ID;
202 10224 : if (path.mnt->mnt_root == path.dentry)
203 709 : stat->attributes |= STATX_ATTR_MOUNT_ROOT;
204 10224 : stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
205 10224 : path_put(&path);
206 20450 : if (retry_estale(error, lookup_flags)) {
207 0 : lookup_flags |= LOOKUP_REVAL;
208 0 : goto retry;
209 : }
210 10225 : out:
211 : return error;
212 : }
213 :
214 14207 : int vfs_fstatat(int dfd, const char __user *filename,
215 : struct kstat *stat, int flags)
216 : {
217 12762 : return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
218 : stat, STATX_BASIC_STATS);
219 : }
220 :
221 : #ifdef __ARCH_WANT_OLD_STAT
222 :
223 : /*
224 : * For backward compatibility? Maybe this should be moved
225 : * into arch/i386 instead?
226 : */
227 0 : static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
228 : {
229 0 : static int warncount = 5;
230 0 : struct __old_kernel_stat tmp;
231 :
232 0 : if (warncount > 0) {
233 0 : warncount--;
234 0 : printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
235 0 : current->comm);
236 0 : } else if (warncount < 0) {
237 : /* it's laughable, but... */
238 0 : warncount = 0;
239 : }
240 :
241 0 : memset(&tmp, 0, sizeof(struct __old_kernel_stat));
242 0 : tmp.st_dev = old_encode_dev(stat->dev);
243 0 : tmp.st_ino = stat->ino;
244 0 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
245 : return -EOVERFLOW;
246 0 : tmp.st_mode = stat->mode;
247 0 : tmp.st_nlink = stat->nlink;
248 0 : if (tmp.st_nlink != stat->nlink)
249 : return -EOVERFLOW;
250 0 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
251 0 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
252 0 : tmp.st_rdev = old_encode_dev(stat->rdev);
253 : #if BITS_PER_LONG == 32
254 : if (stat->size > MAX_NON_LFS)
255 : return -EOVERFLOW;
256 : #endif
257 0 : tmp.st_size = stat->size;
258 0 : tmp.st_atime = stat->atime.tv_sec;
259 0 : tmp.st_mtime = stat->mtime.tv_sec;
260 0 : tmp.st_ctime = stat->ctime.tv_sec;
261 0 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
262 : }
263 :
264 0 : SYSCALL_DEFINE2(stat, const char __user *, filename,
265 : struct __old_kernel_stat __user *, statbuf)
266 : {
267 0 : struct kstat stat;
268 0 : int error;
269 :
270 0 : error = vfs_stat(filename, &stat);
271 0 : if (error)
272 0 : return error;
273 :
274 0 : return cp_old_stat(&stat, statbuf);
275 : }
276 :
277 0 : SYSCALL_DEFINE2(lstat, const char __user *, filename,
278 : struct __old_kernel_stat __user *, statbuf)
279 : {
280 0 : struct kstat stat;
281 0 : int error;
282 :
283 0 : error = vfs_lstat(filename, &stat);
284 0 : if (error)
285 0 : return error;
286 :
287 0 : return cp_old_stat(&stat, statbuf);
288 : }
289 :
290 0 : SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
291 : {
292 0 : struct kstat stat;
293 0 : int error = vfs_fstat(fd, &stat);
294 :
295 0 : if (!error)
296 0 : error = cp_old_stat(&stat, statbuf);
297 :
298 0 : return error;
299 : }
300 :
301 : #endif /* __ARCH_WANT_OLD_STAT */
302 :
303 : #ifdef __ARCH_WANT_NEW_STAT
304 :
305 : #if BITS_PER_LONG == 32
306 : # define choose_32_64(a,b) a
307 : #else
308 : # define choose_32_64(a,b) b
309 : #endif
310 :
311 : #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
312 : #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
313 :
314 : #ifndef INIT_STRUCT_STAT_PADDING
315 : # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
316 : #endif
317 :
318 46476 : static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
319 : {
320 46476 : struct stat tmp;
321 :
322 46476 : if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
323 : return -EOVERFLOW;
324 : #if BITS_PER_LONG == 32
325 : if (stat->size > MAX_NON_LFS)
326 : return -EOVERFLOW;
327 : #endif
328 :
329 46476 : INIT_STRUCT_STAT_PADDING(tmp);
330 46476 : tmp.st_dev = encode_dev(stat->dev);
331 46476 : tmp.st_ino = stat->ino;
332 46476 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
333 : return -EOVERFLOW;
334 46476 : tmp.st_mode = stat->mode;
335 46476 : tmp.st_nlink = stat->nlink;
336 46476 : if (tmp.st_nlink != stat->nlink)
337 : return -EOVERFLOW;
338 46476 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
339 46476 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
340 46476 : tmp.st_rdev = encode_dev(stat->rdev);
341 46476 : tmp.st_size = stat->size;
342 46476 : tmp.st_atime = stat->atime.tv_sec;
343 46476 : tmp.st_mtime = stat->mtime.tv_sec;
344 46476 : tmp.st_ctime = stat->ctime.tv_sec;
345 : #ifdef STAT_HAVE_NSEC
346 46476 : tmp.st_atime_nsec = stat->atime.tv_nsec;
347 46476 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
348 46476 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
349 : #endif
350 46476 : tmp.st_blocks = stat->blocks;
351 46476 : tmp.st_blksize = stat->blksize;
352 46476 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
353 : }
354 :
355 21924 : SYSCALL_DEFINE2(newstat, const char __user *, filename,
356 : struct stat __user *, statbuf)
357 : {
358 10962 : struct kstat stat;
359 10962 : int error = vfs_stat(filename, &stat);
360 :
361 10962 : if (error)
362 3492 : return error;
363 7470 : return cp_new_stat(&stat, statbuf);
364 : }
365 :
366 3600 : SYSCALL_DEFINE2(newlstat, const char __user *, filename,
367 : struct stat __user *, statbuf)
368 : {
369 1800 : struct kstat stat;
370 1800 : int error;
371 :
372 1800 : error = vfs_lstat(filename, &stat);
373 1799 : if (error)
374 472 : return error;
375 :
376 1327 : return cp_new_stat(&stat, statbuf);
377 : }
378 :
379 : #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
380 2890 : SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
381 : struct stat __user *, statbuf, int, flag)
382 : {
383 1445 : struct kstat stat;
384 1445 : int error;
385 :
386 1445 : error = vfs_fstatat(dfd, filename, &stat, flag);
387 1445 : if (error)
388 18 : return error;
389 1427 : return cp_new_stat(&stat, statbuf);
390 : }
391 : #endif
392 :
393 72473 : SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
394 : {
395 36232 : struct kstat stat;
396 36232 : int error = vfs_fstat(fd, &stat);
397 :
398 36262 : if (!error)
399 36261 : error = cp_new_stat(&stat, statbuf);
400 :
401 36245 : return error;
402 : }
403 : #endif
404 :
405 1417 : static int do_readlinkat(int dfd, const char __user *pathname,
406 : char __user *buf, int bufsiz)
407 : {
408 1417 : struct path path;
409 1417 : int error;
410 1417 : int empty = 0;
411 1417 : unsigned int lookup_flags = LOOKUP_EMPTY;
412 :
413 1417 : if (bufsiz <= 0)
414 : return -EINVAL;
415 :
416 1417 : retry:
417 1417 : error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
418 1418 : if (!error) {
419 1160 : struct inode *inode = d_backing_inode(path.dentry);
420 :
421 1160 : error = empty ? -ENOENT : -EINVAL;
422 : /*
423 : * AFS mountpoints allow readlink(2) but are not symlinks
424 : */
425 1160 : if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
426 1156 : error = security_inode_readlink(path.dentry);
427 1156 : if (!error) {
428 1156 : touch_atime(&path);
429 1156 : error = vfs_readlink(path.dentry, buf, bufsiz);
430 : }
431 : }
432 1159 : path_put(&path);
433 2320 : if (retry_estale(error, lookup_flags)) {
434 0 : lookup_flags |= LOOKUP_REVAL;
435 0 : goto retry;
436 : }
437 : }
438 : return error;
439 : }
440 :
441 2302 : SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
442 : char __user *, buf, int, bufsiz)
443 : {
444 1151 : return do_readlinkat(dfd, pathname, buf, bufsiz);
445 : }
446 :
447 533 : SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
448 : int, bufsiz)
449 : {
450 266 : return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
451 : }
452 :
453 :
454 : /* ---------- LFS-64 ----------- */
455 : #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
456 :
457 : #ifndef INIT_STRUCT_STAT64_PADDING
458 : # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
459 : #endif
460 :
461 : static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
462 : {
463 : struct stat64 tmp;
464 :
465 : INIT_STRUCT_STAT64_PADDING(tmp);
466 : #ifdef CONFIG_MIPS
467 : /* mips has weird padding, so we don't get 64 bits there */
468 : tmp.st_dev = new_encode_dev(stat->dev);
469 : tmp.st_rdev = new_encode_dev(stat->rdev);
470 : #else
471 : tmp.st_dev = huge_encode_dev(stat->dev);
472 : tmp.st_rdev = huge_encode_dev(stat->rdev);
473 : #endif
474 : tmp.st_ino = stat->ino;
475 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
476 : return -EOVERFLOW;
477 : #ifdef STAT64_HAS_BROKEN_ST_INO
478 : tmp.__st_ino = stat->ino;
479 : #endif
480 : tmp.st_mode = stat->mode;
481 : tmp.st_nlink = stat->nlink;
482 : tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
483 : tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
484 : tmp.st_atime = stat->atime.tv_sec;
485 : tmp.st_atime_nsec = stat->atime.tv_nsec;
486 : tmp.st_mtime = stat->mtime.tv_sec;
487 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
488 : tmp.st_ctime = stat->ctime.tv_sec;
489 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
490 : tmp.st_size = stat->size;
491 : tmp.st_blocks = stat->blocks;
492 : tmp.st_blksize = stat->blksize;
493 : return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
494 : }
495 :
496 : SYSCALL_DEFINE2(stat64, const char __user *, filename,
497 : struct stat64 __user *, statbuf)
498 : {
499 : struct kstat stat;
500 : int error = vfs_stat(filename, &stat);
501 :
502 : if (!error)
503 : error = cp_new_stat64(&stat, statbuf);
504 :
505 : return error;
506 : }
507 :
508 : SYSCALL_DEFINE2(lstat64, const char __user *, filename,
509 : struct stat64 __user *, statbuf)
510 : {
511 : struct kstat stat;
512 : int error = vfs_lstat(filename, &stat);
513 :
514 : if (!error)
515 : error = cp_new_stat64(&stat, statbuf);
516 :
517 : return error;
518 : }
519 :
520 : SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
521 : {
522 : struct kstat stat;
523 : int error = vfs_fstat(fd, &stat);
524 :
525 : if (!error)
526 : error = cp_new_stat64(&stat, statbuf);
527 :
528 : return error;
529 : }
530 :
531 : SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
532 : struct stat64 __user *, statbuf, int, flag)
533 : {
534 : struct kstat stat;
535 : int error;
536 :
537 : error = vfs_fstatat(dfd, filename, &stat, flag);
538 : if (error)
539 : return error;
540 : return cp_new_stat64(&stat, statbuf);
541 : }
542 : #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
543 :
544 : static noinline_for_stack int
545 0 : cp_statx(const struct kstat *stat, struct statx __user *buffer)
546 : {
547 0 : struct statx tmp;
548 :
549 0 : memset(&tmp, 0, sizeof(tmp));
550 :
551 0 : tmp.stx_mask = stat->result_mask;
552 0 : tmp.stx_blksize = stat->blksize;
553 0 : tmp.stx_attributes = stat->attributes;
554 0 : tmp.stx_nlink = stat->nlink;
555 0 : tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
556 0 : tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
557 0 : tmp.stx_mode = stat->mode;
558 0 : tmp.stx_ino = stat->ino;
559 0 : tmp.stx_size = stat->size;
560 0 : tmp.stx_blocks = stat->blocks;
561 0 : tmp.stx_attributes_mask = stat->attributes_mask;
562 0 : tmp.stx_atime.tv_sec = stat->atime.tv_sec;
563 0 : tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
564 0 : tmp.stx_btime.tv_sec = stat->btime.tv_sec;
565 0 : tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
566 0 : tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
567 0 : tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
568 0 : tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
569 0 : tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
570 0 : tmp.stx_rdev_major = MAJOR(stat->rdev);
571 0 : tmp.stx_rdev_minor = MINOR(stat->rdev);
572 0 : tmp.stx_dev_major = MAJOR(stat->dev);
573 0 : tmp.stx_dev_minor = MINOR(stat->dev);
574 0 : tmp.stx_mnt_id = stat->mnt_id;
575 :
576 0 : return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
577 : }
578 :
579 0 : int do_statx(int dfd, const char __user *filename, unsigned flags,
580 : unsigned int mask, struct statx __user *buffer)
581 : {
582 0 : struct kstat stat;
583 0 : int error;
584 :
585 0 : if (mask & STATX__RESERVED)
586 : return -EINVAL;
587 0 : if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
588 : return -EINVAL;
589 :
590 0 : error = vfs_statx(dfd, filename, flags, &stat, mask);
591 0 : if (error)
592 : return error;
593 :
594 0 : return cp_statx(&stat, buffer);
595 : }
596 :
597 : /**
598 : * sys_statx - System call to get enhanced stats
599 : * @dfd: Base directory to pathwalk from *or* fd to stat.
600 : * @filename: File to stat or "" with AT_EMPTY_PATH
601 : * @flags: AT_* flags to control pathwalk.
602 : * @mask: Parts of statx struct actually required.
603 : * @buffer: Result buffer.
604 : *
605 : * Note that fstat() can be emulated by setting dfd to the fd of interest,
606 : * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
607 : */
608 0 : SYSCALL_DEFINE5(statx,
609 : int, dfd, const char __user *, filename, unsigned, flags,
610 : unsigned int, mask,
611 : struct statx __user *, buffer)
612 : {
613 0 : return do_statx(dfd, filename, flags, mask, buffer);
614 : }
615 :
616 : #ifdef CONFIG_COMPAT
617 0 : static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
618 : {
619 0 : struct compat_stat tmp;
620 :
621 0 : if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
622 : return -EOVERFLOW;
623 :
624 0 : memset(&tmp, 0, sizeof(tmp));
625 0 : tmp.st_dev = old_encode_dev(stat->dev);
626 0 : tmp.st_ino = stat->ino;
627 0 : if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
628 : return -EOVERFLOW;
629 0 : tmp.st_mode = stat->mode;
630 0 : tmp.st_nlink = stat->nlink;
631 0 : if (tmp.st_nlink != stat->nlink)
632 : return -EOVERFLOW;
633 0 : SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
634 0 : SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
635 0 : tmp.st_rdev = old_encode_dev(stat->rdev);
636 0 : if ((u64) stat->size > MAX_NON_LFS)
637 : return -EOVERFLOW;
638 0 : tmp.st_size = stat->size;
639 0 : tmp.st_atime = stat->atime.tv_sec;
640 0 : tmp.st_atime_nsec = stat->atime.tv_nsec;
641 0 : tmp.st_mtime = stat->mtime.tv_sec;
642 0 : tmp.st_mtime_nsec = stat->mtime.tv_nsec;
643 0 : tmp.st_ctime = stat->ctime.tv_sec;
644 0 : tmp.st_ctime_nsec = stat->ctime.tv_nsec;
645 0 : tmp.st_blocks = stat->blocks;
646 0 : tmp.st_blksize = stat->blksize;
647 0 : return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
648 : }
649 :
650 0 : COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
651 : struct compat_stat __user *, statbuf)
652 : {
653 0 : struct kstat stat;
654 0 : int error;
655 :
656 0 : error = vfs_stat(filename, &stat);
657 0 : if (error)
658 0 : return error;
659 0 : return cp_compat_stat(&stat, statbuf);
660 : }
661 :
662 0 : COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
663 : struct compat_stat __user *, statbuf)
664 : {
665 0 : struct kstat stat;
666 0 : int error;
667 :
668 0 : error = vfs_lstat(filename, &stat);
669 0 : if (error)
670 0 : return error;
671 0 : return cp_compat_stat(&stat, statbuf);
672 : }
673 :
674 : #ifndef __ARCH_WANT_STAT64
675 0 : COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
676 : const char __user *, filename,
677 : struct compat_stat __user *, statbuf, int, flag)
678 : {
679 0 : struct kstat stat;
680 0 : int error;
681 :
682 0 : error = vfs_fstatat(dfd, filename, &stat, flag);
683 0 : if (error)
684 0 : return error;
685 0 : return cp_compat_stat(&stat, statbuf);
686 : }
687 : #endif
688 :
689 0 : COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
690 : struct compat_stat __user *, statbuf)
691 : {
692 0 : struct kstat stat;
693 0 : int error = vfs_fstat(fd, &stat);
694 :
695 0 : if (!error)
696 0 : error = cp_compat_stat(&stat, statbuf);
697 0 : return error;
698 : }
699 : #endif
700 :
701 : /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
702 244 : void __inode_add_bytes(struct inode *inode, loff_t bytes)
703 : {
704 244 : inode->i_blocks += bytes >> 9;
705 244 : bytes &= 511;
706 244 : inode->i_bytes += bytes;
707 244 : if (inode->i_bytes >= 512) {
708 0 : inode->i_blocks++;
709 0 : inode->i_bytes -= 512;
710 : }
711 244 : }
712 : EXPORT_SYMBOL(__inode_add_bytes);
713 :
714 244 : void inode_add_bytes(struct inode *inode, loff_t bytes)
715 : {
716 244 : spin_lock(&inode->i_lock);
717 244 : __inode_add_bytes(inode, bytes);
718 244 : spin_unlock(&inode->i_lock);
719 244 : }
720 :
721 : EXPORT_SYMBOL(inode_add_bytes);
722 :
723 206 : void __inode_sub_bytes(struct inode *inode, loff_t bytes)
724 : {
725 206 : inode->i_blocks -= bytes >> 9;
726 206 : bytes &= 511;
727 206 : if (inode->i_bytes < bytes) {
728 0 : inode->i_blocks--;
729 0 : inode->i_bytes += 512;
730 : }
731 206 : inode->i_bytes -= bytes;
732 206 : }
733 :
734 : EXPORT_SYMBOL(__inode_sub_bytes);
735 :
736 206 : void inode_sub_bytes(struct inode *inode, loff_t bytes)
737 : {
738 206 : spin_lock(&inode->i_lock);
739 206 : __inode_sub_bytes(inode, bytes);
740 206 : spin_unlock(&inode->i_lock);
741 206 : }
742 :
743 : EXPORT_SYMBOL(inode_sub_bytes);
744 :
745 0 : loff_t inode_get_bytes(struct inode *inode)
746 : {
747 0 : loff_t ret;
748 :
749 0 : spin_lock(&inode->i_lock);
750 0 : ret = __inode_get_bytes(inode);
751 0 : spin_unlock(&inode->i_lock);
752 0 : return ret;
753 : }
754 :
755 : EXPORT_SYMBOL(inode_get_bytes);
756 :
757 0 : void inode_set_bytes(struct inode *inode, loff_t bytes)
758 : {
759 : /* Caller is here responsible for sufficient locking
760 : * (ie. inode->i_lock) */
761 0 : inode->i_blocks = bytes >> 9;
762 0 : inode->i_bytes = bytes & 511;
763 0 : }
764 :
765 : EXPORT_SYMBOL(inode_set_bytes);
|