Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Landlock LSM - Filesystem management and hooks
4 : *
5 : * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 : * Copyright © 2018-2020 ANSSI
7 : */
8 :
9 : #include <linux/atomic.h>
10 : #include <linux/bitops.h>
11 : #include <linux/bits.h>
12 : #include <linux/compiler_types.h>
13 : #include <linux/dcache.h>
14 : #include <linux/err.h>
15 : #include <linux/fs.h>
16 : #include <linux/init.h>
17 : #include <linux/kernel.h>
18 : #include <linux/limits.h>
19 : #include <linux/list.h>
20 : #include <linux/lsm_hooks.h>
21 : #include <linux/mount.h>
22 : #include <linux/namei.h>
23 : #include <linux/path.h>
24 : #include <linux/rcupdate.h>
25 : #include <linux/spinlock.h>
26 : #include <linux/stat.h>
27 : #include <linux/types.h>
28 : #include <linux/wait_bit.h>
29 : #include <linux/workqueue.h>
30 : #include <uapi/linux/landlock.h>
31 :
32 : #include "common.h"
33 : #include "cred.h"
34 : #include "fs.h"
35 : #include "limits.h"
36 : #include "object.h"
37 : #include "ruleset.h"
38 : #include "setup.h"
39 :
40 : /* Underlying object management */
41 :
42 92 : static void release_inode(struct landlock_object *const object)
43 : __releases(object->lock)
44 : {
45 92 : struct inode *const inode = object->underobj;
46 92 : struct super_block *sb;
47 :
48 92 : if (!inode) {
49 87 : spin_unlock(&object->lock);
50 87 : return;
51 : }
52 :
53 : /*
54 : * Protects against concurrent use by hook_sb_delete() of the reference
55 : * to the underlying inode.
56 : */
57 5 : object->underobj = NULL;
58 : /*
59 : * Makes sure that if the filesystem is concurrently unmounted,
60 : * hook_sb_delete() will wait for us to finish iput().
61 : */
62 5 : sb = inode->i_sb;
63 5 : atomic_long_inc(&landlock_superblock(sb)->inode_refs);
64 5 : spin_unlock(&object->lock);
65 : /*
66 : * Because object->underobj was not NULL, hook_sb_delete() and
67 : * get_inode_object() guarantee that it is safe to reset
68 : * landlock_inode(inode)->object while it is not NULL. It is therefore
69 : * not necessary to lock inode->i_lock.
70 : */
71 5 : rcu_assign_pointer(landlock_inode(inode)->object, NULL);
72 : /*
73 : * Now, new rules can safely be tied to @inode with get_inode_object().
74 : */
75 :
76 5 : iput(inode);
77 10 : if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
78 5 : wake_up_var(&landlock_superblock(sb)->inode_refs);
79 : }
80 :
81 : static const struct landlock_object_underops landlock_fs_underops = {
82 : .release = release_inode
83 : };
84 :
85 : /* Ruleset management */
86 :
87 104 : static struct landlock_object *get_inode_object(struct inode *const inode)
88 : {
89 104 : struct landlock_object *object, *new_object;
90 104 : struct landlock_inode_security *inode_sec = landlock_inode(inode);
91 :
92 104 : rcu_read_lock();
93 : retry:
94 104 : object = rcu_dereference(inode_sec->object);
95 104 : if (object) {
96 12 : if (likely(refcount_inc_not_zero(&object->usage))) {
97 12 : rcu_read_unlock();
98 12 : return object;
99 : }
100 : /*
101 : * We are racing with release_inode(), the object is going
102 : * away. Wait for release_inode(), then retry.
103 : */
104 0 : spin_lock(&object->lock);
105 0 : spin_unlock(&object->lock);
106 0 : goto retry;
107 : }
108 92 : rcu_read_unlock();
109 :
110 : /*
111 : * If there is no object tied to @inode, then create a new one (without
112 : * holding any locks).
113 : */
114 92 : new_object = landlock_create_object(&landlock_fs_underops, inode);
115 92 : if (IS_ERR(new_object))
116 0 : return new_object;
117 :
118 : /*
119 : * Protects against concurrent calls to get_inode_object() or
120 : * hook_sb_delete().
121 : */
122 92 : spin_lock(&inode->i_lock);
123 92 : if (unlikely(rcu_access_pointer(inode_sec->object))) {
124 : /* Someone else just created the object, bail out and retry. */
125 0 : spin_unlock(&inode->i_lock);
126 0 : kfree(new_object);
127 :
128 0 : rcu_read_lock();
129 0 : goto retry;
130 : }
131 :
132 : /*
133 : * @inode will be released by hook_sb_delete() on its superblock
134 : * shutdown, or by release_inode() when no more ruleset references the
135 : * related object.
136 : */
137 92 : ihold(inode);
138 92 : rcu_assign_pointer(inode_sec->object, new_object);
139 92 : spin_unlock(&inode->i_lock);
140 92 : return new_object;
141 : }
142 :
143 : /* All access rights that can be tied to files. */
144 : #define ACCESS_FILE ( \
145 : LANDLOCK_ACCESS_FS_EXECUTE | \
146 : LANDLOCK_ACCESS_FS_WRITE_FILE | \
147 : LANDLOCK_ACCESS_FS_READ_FILE)
148 :
149 : /*
150 : * @path: Should have been checked by get_path_from_fd().
151 : */
152 114 : int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
153 : const struct path *const path, u32 access_rights)
154 : {
155 114 : int err;
156 114 : struct landlock_object *object;
157 :
158 : /* Files only get access rights that make sense. */
159 143 : if (!d_is_dir(path->dentry) && (access_rights | ACCESS_FILE) !=
160 : ACCESS_FILE)
161 : return -EINVAL;
162 104 : if (WARN_ON_ONCE(ruleset->num_layers != 1))
163 : return -EINVAL;
164 :
165 : /* Transforms relative access rights to absolute ones. */
166 104 : access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
167 104 : object = get_inode_object(d_backing_inode(path->dentry));
168 104 : if (IS_ERR(object))
169 0 : return PTR_ERR(object);
170 104 : mutex_lock(&ruleset->lock);
171 104 : err = landlock_insert_rule(ruleset, object, access_rights);
172 104 : mutex_unlock(&ruleset->lock);
173 : /*
174 : * No need to check for an error because landlock_insert_rule()
175 : * increments the refcount for the new object if needed.
176 : */
177 104 : landlock_put_object(object);
178 104 : return err;
179 : }
180 :
181 : /* Access-control management */
182 :
183 1808 : static inline u64 unmask_layers(
184 : const struct landlock_ruleset *const domain,
185 : const struct path *const path, const u32 access_request,
186 : u64 layer_mask)
187 : {
188 1808 : const struct landlock_rule *rule;
189 1808 : const struct inode *inode;
190 1808 : size_t i;
191 :
192 1808 : if (d_is_negative(path->dentry))
193 : /* Ignore nonexistent leafs. */
194 : return layer_mask;
195 1808 : inode = d_backing_inode(path->dentry);
196 1808 : rcu_read_lock();
197 5424 : rule = landlock_find_rule(domain,
198 1808 : rcu_dereference(landlock_inode(inode)->object));
199 1808 : rcu_read_unlock();
200 1808 : if (!rule)
201 : return layer_mask;
202 :
203 : /*
204 : * An access is granted if, for each policy layer, at least one rule
205 : * encountered on the pathwalk grants the requested accesses,
206 : * regardless of their position in the layer stack. We must then check
207 : * the remaining layers for each inode, from the first added layer to
208 : * the last one.
209 : */
210 1030 : for (i = 0; i < rule->num_layers; i++) {
211 663 : const struct landlock_layer *const layer = &rule->layers[i];
212 663 : const u64 layer_level = BIT_ULL(layer->level - 1);
213 :
214 : /* Checks that the layer grants access to the full request. */
215 663 : if ((layer->access & access_request) == access_request) {
216 571 : layer_mask &= ~layer_level;
217 :
218 571 : if (layer_mask == 0)
219 : return layer_mask;
220 : }
221 : }
222 : return layer_mask;
223 : }
224 :
225 444 : static int check_access_path(const struct landlock_ruleset *const domain,
226 : const struct path *const path, u32 access_request)
227 : {
228 444 : bool allowed = false;
229 444 : struct path walker_path;
230 444 : u64 layer_mask;
231 444 : size_t i;
232 :
233 : /* Make sure all layers can be checked. */
234 444 : BUILD_BUG_ON(BITS_PER_TYPE(layer_mask) < LANDLOCK_MAX_NUM_LAYERS);
235 :
236 444 : if (!access_request)
237 : return 0;
238 444 : if (WARN_ON_ONCE(!domain || !path))
239 : return 0;
240 : /*
241 : * Allows access to pseudo filesystems that will never be mountable
242 : * (e.g. sockfs, pipefs), but can still be reachable through
243 : * /proc/<pid>/fd/<file-descriptor> .
244 : */
245 444 : if ((path->dentry->d_sb->s_flags & SB_NOUSER) ||
246 442 : (d_is_positive(path->dentry) &&
247 442 : unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))))
248 : return 0;
249 442 : if (WARN_ON_ONCE(domain->num_layers < 1))
250 : return -EACCES;
251 :
252 : /* Saves all layers handling a subset of requested accesses. */
253 : layer_mask = 0;
254 1281 : for (i = 0; i < domain->num_layers; i++) {
255 839 : if (domain->fs_access_masks[i] & access_request)
256 755 : layer_mask |= BIT_ULL(i);
257 : }
258 : /* An access request not handled by the domain is allowed. */
259 442 : if (layer_mask == 0)
260 : return 0;
261 :
262 401 : walker_path = *path;
263 401 : path_get(&walker_path);
264 : /*
265 : * We need to walk through all the hierarchy to not miss any relevant
266 : * restriction.
267 : */
268 3215 : while (true) {
269 1808 : struct dentry *parent_dentry;
270 :
271 1808 : layer_mask = unmask_layers(domain, &walker_path,
272 : access_request, layer_mask);
273 1808 : if (layer_mask == 0) {
274 : /* Stops when a rule from each layer grants access. */
275 : allowed = true;
276 : break;
277 : }
278 :
279 1587 : jump_up:
280 2040 : if (walker_path.dentry == walker_path.mnt->mnt_root) {
281 632 : if (follow_up(&walker_path)) {
282 : /* Ignores hidden mount points. */
283 453 : goto jump_up;
284 : } else {
285 : /*
286 : * Stops at the real root. Denies access
287 : * because not all layers have granted access.
288 : */
289 : allowed = false;
290 : break;
291 : }
292 : }
293 1408 : if (unlikely(IS_ROOT(walker_path.dentry))) {
294 : /*
295 : * Stops at disconnected root directories. Only allows
296 : * access to internal filesystems (e.g. nsfs, which is
297 : * reachable through /proc/<pid>/ns/<namespace>).
298 : */
299 1 : allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
300 1 : break;
301 : }
302 1407 : parent_dentry = dget_parent(walker_path.dentry);
303 1407 : dput(walker_path.dentry);
304 1407 : walker_path.dentry = parent_dentry;
305 : }
306 401 : path_put(&walker_path);
307 401 : return allowed ? 0 : -EACCES;
308 : }
309 :
310 2861 : static inline int current_check_access_path(const struct path *const path,
311 : const u32 access_request)
312 : {
313 2861 : const struct landlock_ruleset *const dom =
314 2861 : landlock_get_current_domain();
315 :
316 2861 : if (!dom)
317 : return 0;
318 39 : return check_access_path(dom, path, access_request);
319 : }
320 :
321 : /* Inode hooks */
322 :
323 5296 : static void hook_inode_free_security(struct inode *const inode)
324 : {
325 : /*
326 : * All inodes must already have been untied from their object by
327 : * release_inode() or hook_sb_delete().
328 : */
329 5296 : WARN_ON_ONCE(landlock_inode(inode)->object);
330 5296 : }
331 :
332 : /* Super-block hooks */
333 :
334 : /*
335 : * Release the inodes used in a security policy.
336 : *
337 : * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
338 : */
339 98 : static void hook_sb_delete(struct super_block *const sb)
340 : {
341 98 : struct inode *inode, *prev_inode = NULL;
342 :
343 98 : if (!landlock_initialized)
344 : return;
345 :
346 98 : spin_lock(&sb->s_inode_list_lock);
347 193 : list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
348 95 : struct landlock_object *object;
349 :
350 : /* Only handles referenced inodes. */
351 95 : if (!atomic_read(&inode->i_count))
352 0 : continue;
353 :
354 : /*
355 : * Protects against concurrent modification of inode (e.g.
356 : * from get_inode_object()).
357 : */
358 95 : spin_lock(&inode->i_lock);
359 : /*
360 : * Checks I_FREEING and I_WILL_FREE to protect against a race
361 : * condition when release_inode() just called iput(), which
362 : * could lead to a NULL dereference of inode->security or a
363 : * second call to iput() for the same Landlock object. Also
364 : * checks I_NEW because such inode cannot be tied to an object.
365 : */
366 95 : if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
367 0 : spin_unlock(&inode->i_lock);
368 0 : continue;
369 : }
370 :
371 95 : rcu_read_lock();
372 95 : object = rcu_dereference(landlock_inode(inode)->object);
373 95 : if (!object) {
374 8 : rcu_read_unlock();
375 8 : spin_unlock(&inode->i_lock);
376 8 : continue;
377 : }
378 : /* Keeps a reference to this inode until the next loop walk. */
379 87 : __iget(inode);
380 87 : spin_unlock(&inode->i_lock);
381 :
382 : /*
383 : * If there is no concurrent release_inode() ongoing, then we
384 : * are in charge of calling iput() on this inode, otherwise we
385 : * will just wait for it to finish.
386 : */
387 87 : spin_lock(&object->lock);
388 87 : if (object->underobj == inode) {
389 87 : object->underobj = NULL;
390 87 : spin_unlock(&object->lock);
391 87 : rcu_read_unlock();
392 :
393 : /*
394 : * Because object->underobj was not NULL,
395 : * release_inode() and get_inode_object() guarantee
396 : * that it is safe to reset
397 : * landlock_inode(inode)->object while it is not NULL.
398 : * It is therefore not necessary to lock inode->i_lock.
399 : */
400 87 : rcu_assign_pointer(landlock_inode(inode)->object, NULL);
401 : /*
402 : * At this point, we own the ihold() reference that was
403 : * originally set up by get_inode_object() and the
404 : * __iget() reference that we just set in this loop
405 : * walk. Therefore the following call to iput() will
406 : * not sleep nor drop the inode because there is now at
407 : * least two references to it.
408 : */
409 87 : iput(inode);
410 : } else {
411 0 : spin_unlock(&object->lock);
412 0 : rcu_read_unlock();
413 : }
414 :
415 87 : if (prev_inode) {
416 : /*
417 : * At this point, we still own the __iget() reference
418 : * that we just set in this loop walk. Therefore we
419 : * can drop the list lock and know that the inode won't
420 : * disappear from under us until the next loop walk.
421 : */
422 45 : spin_unlock(&sb->s_inode_list_lock);
423 : /*
424 : * We can now actually put the inode reference from the
425 : * previous loop walk, which is not needed anymore.
426 : */
427 45 : iput(prev_inode);
428 45 : cond_resched();
429 140 : spin_lock(&sb->s_inode_list_lock);
430 : }
431 : prev_inode = inode;
432 : }
433 98 : spin_unlock(&sb->s_inode_list_lock);
434 :
435 : /* Puts the inode reference from the last loop walk, if any. */
436 98 : if (prev_inode)
437 42 : iput(prev_inode);
438 : /* Waits for pending iput() in release_inode(). */
439 98 : wait_var_event(&landlock_superblock(sb)->inode_refs, !atomic_long_read(
440 : &landlock_superblock(sb)->inode_refs));
441 : }
442 :
443 : /*
444 : * Because a Landlock security policy is defined according to the filesystem
445 : * topology (i.e. the mount namespace), changing it may grant access to files
446 : * not previously allowed.
447 : *
448 : * To make it simple, deny any filesystem topology modification by landlocked
449 : * processes. Non-landlocked processes may still change the namespace of a
450 : * landlocked process, but this kind of threat must be handled by a system-wide
451 : * access-control security policy.
452 : *
453 : * This could be lifted in the future if Landlock can safely handle mount
454 : * namespace updates requested by a landlocked process. Indeed, we could
455 : * update the current domain (which is currently read-only) by taking into
456 : * account the accesses of the source and the destination of a new mount point.
457 : * However, it would also require to make all the child domains dynamically
458 : * inherit these new constraints. Anyway, for backward compatibility reasons,
459 : * a dedicated user space option would be required (e.g. as a ruleset flag).
460 : */
461 250 : static int hook_sb_mount(const char *const dev_name,
462 : const struct path *const path, const char *const type,
463 : const unsigned long flags, void *const data)
464 : {
465 250 : if (!landlock_get_current_domain())
466 249 : return 0;
467 : return -EPERM;
468 : }
469 :
470 3 : static int hook_move_mount(const struct path *const from_path,
471 : const struct path *const to_path)
472 : {
473 3 : if (!landlock_get_current_domain())
474 2 : return 0;
475 : return -EPERM;
476 : }
477 :
478 : /*
479 : * Removing a mount point may reveal a previously hidden file hierarchy, which
480 : * may then grant access to files, which may have previously been forbidden.
481 : */
482 112 : static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
483 : {
484 112 : if (!landlock_get_current_domain())
485 112 : return 0;
486 : return -EPERM;
487 : }
488 :
489 3 : static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
490 : {
491 3 : if (!landlock_get_current_domain())
492 3 : return 0;
493 : return -EPERM;
494 : }
495 :
496 : /*
497 : * pivot_root(2), like mount(2), changes the current mount namespace. It must
498 : * then be forbidden for a landlocked process.
499 : *
500 : * However, chroot(2) may be allowed because it only changes the relative root
501 : * directory of the current process. Moreover, it can be used to restrict the
502 : * view of the filesystem.
503 : */
504 2 : static int hook_sb_pivotroot(const struct path *const old_path,
505 : const struct path *const new_path)
506 : {
507 2 : if (!landlock_get_current_domain())
508 1 : return 0;
509 : return -EPERM;
510 : }
511 :
512 : /* Path hooks */
513 :
514 81 : static inline u32 get_mode_access(const umode_t mode)
515 : {
516 81 : switch (mode & S_IFMT) {
517 : case S_IFLNK:
518 : return LANDLOCK_ACCESS_FS_MAKE_SYM;
519 32 : case 0:
520 : /* A zero mode translates to S_IFREG. */
521 : case S_IFREG:
522 32 : return LANDLOCK_ACCESS_FS_MAKE_REG;
523 7 : case S_IFDIR:
524 7 : return LANDLOCK_ACCESS_FS_MAKE_DIR;
525 9 : case S_IFCHR:
526 9 : return LANDLOCK_ACCESS_FS_MAKE_CHAR;
527 9 : case S_IFBLK:
528 9 : return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
529 9 : case S_IFIFO:
530 9 : return LANDLOCK_ACCESS_FS_MAKE_FIFO;
531 9 : case S_IFSOCK:
532 9 : return LANDLOCK_ACCESS_FS_MAKE_SOCK;
533 : default:
534 0 : WARN_ON_ONCE(1);
535 0 : return 0;
536 : }
537 : }
538 :
539 : /*
540 : * Creating multiple links or renaming may lead to privilege escalations if not
541 : * handled properly. Indeed, we must be sure that the source doesn't gain more
542 : * privileges by being accessible from the destination. This is getting more
543 : * complex when dealing with multiple layers. The whole picture can be seen as
544 : * a multilayer partial ordering problem. A future version of Landlock will
545 : * deal with that.
546 : */
547 26 : static int hook_path_link(struct dentry *const old_dentry,
548 : const struct path *const new_dir,
549 : struct dentry *const new_dentry)
550 : {
551 26 : const struct landlock_ruleset *const dom =
552 26 : landlock_get_current_domain();
553 :
554 26 : if (!dom)
555 : return 0;
556 : /* The mount points are the same for old and new paths, cf. EXDEV. */
557 26 : if (old_dentry->d_parent != new_dir->dentry)
558 : /* Gracefully forbids reparenting. */
559 : return -EXDEV;
560 24 : if (unlikely(d_is_negative(old_dentry)))
561 : return -ENOENT;
562 24 : return check_access_path(dom, new_dir,
563 24 : get_mode_access(d_backing_inode(old_dentry)->i_mode));
564 : }
565 :
566 70 : static inline u32 maybe_remove(const struct dentry *const dentry)
567 : {
568 70 : if (d_is_negative(dentry))
569 : return 0;
570 85 : return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
571 : LANDLOCK_ACCESS_FS_REMOVE_FILE;
572 : }
573 :
574 415 : static int hook_path_rename(const struct path *const old_dir,
575 : struct dentry *const old_dentry,
576 : const struct path *const new_dir,
577 : struct dentry *const new_dentry)
578 : {
579 415 : const struct landlock_ruleset *const dom =
580 415 : landlock_get_current_domain();
581 :
582 415 : if (!dom)
583 : return 0;
584 : /* The mount points are the same for old and new paths, cf. EXDEV. */
585 46 : if (old_dir->dentry != new_dir->dentry)
586 : /* Gracefully forbids reparenting. */
587 : return -EXDEV;
588 35 : if (unlikely(d_is_negative(old_dentry)))
589 : return -ENOENT;
590 : /* RENAME_EXCHANGE is handled because directories are the same. */
591 35 : return check_access_path(dom, old_dir, maybe_remove(old_dentry) |
592 35 : maybe_remove(new_dentry) |
593 35 : get_mode_access(d_backing_inode(old_dentry)->i_mode));
594 : }
595 :
596 786 : static int hook_path_mkdir(const struct path *const dir,
597 : struct dentry *const dentry, const umode_t mode)
598 : {
599 786 : return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
600 : }
601 :
602 1349 : static int hook_path_mknod(const struct path *const dir,
603 : struct dentry *const dentry, const umode_t mode,
604 : const unsigned int dev)
605 : {
606 1349 : const struct landlock_ruleset *const dom =
607 1349 : landlock_get_current_domain();
608 :
609 1349 : if (!dom)
610 : return 0;
611 22 : return check_access_path(dom, dir, get_mode_access(mode));
612 : }
613 :
614 210 : static int hook_path_symlink(const struct path *const dir,
615 : struct dentry *const dentry, const char *const old_name)
616 : {
617 210 : return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
618 : }
619 :
620 795 : static int hook_path_unlink(const struct path *const dir,
621 : struct dentry *const dentry)
622 : {
623 795 : return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
624 : }
625 :
626 1070 : static int hook_path_rmdir(const struct path *const dir,
627 : struct dentry *const dentry)
628 : {
629 1070 : return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
630 : }
631 :
632 : /* File hooks */
633 :
634 324 : static inline u32 get_file_access(const struct file *const file)
635 : {
636 324 : u32 access = 0;
637 :
638 324 : if (file->f_mode & FMODE_READ) {
639 : /* A directory can only be opened in read mode. */
640 268 : if (S_ISDIR(file_inode(file)->i_mode))
641 : return LANDLOCK_ACCESS_FS_READ_DIR;
642 : access = LANDLOCK_ACCESS_FS_READ_FILE;
643 : }
644 200 : if (file->f_mode & FMODE_WRITE)
645 112 : access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
646 : /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
647 200 : if (file->f_flags & __FMODE_EXEC)
648 3 : access |= LANDLOCK_ACCESS_FS_EXECUTE;
649 : return access;
650 : }
651 :
652 17830 : static int hook_file_open(struct file *const file)
653 : {
654 17830 : const struct landlock_ruleset *const dom =
655 17830 : landlock_get_current_domain();
656 :
657 17832 : if (!dom)
658 : return 0;
659 : /*
660 : * Because a file may be opened with O_PATH, get_file_access() may
661 : * return 0. This case will be handled with a future Landlock
662 : * evolution.
663 : */
664 324 : return check_access_path(dom, &file->f_path, get_file_access(file));
665 : }
666 :
667 : static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
668 : LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
669 :
670 : LSM_HOOK_INIT(sb_delete, hook_sb_delete),
671 : LSM_HOOK_INIT(sb_mount, hook_sb_mount),
672 : LSM_HOOK_INIT(move_mount, hook_move_mount),
673 : LSM_HOOK_INIT(sb_umount, hook_sb_umount),
674 : LSM_HOOK_INIT(sb_remount, hook_sb_remount),
675 : LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
676 :
677 : LSM_HOOK_INIT(path_link, hook_path_link),
678 : LSM_HOOK_INIT(path_rename, hook_path_rename),
679 : LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
680 : LSM_HOOK_INIT(path_mknod, hook_path_mknod),
681 : LSM_HOOK_INIT(path_symlink, hook_path_symlink),
682 : LSM_HOOK_INIT(path_unlink, hook_path_unlink),
683 : LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
684 :
685 : LSM_HOOK_INIT(file_open, hook_file_open),
686 : };
687 :
688 1 : __init void landlock_add_fs_hooks(void)
689 : {
690 1 : security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
691 : LANDLOCK_NAME);
692 1 : }
|