Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Landlock LSM - Ruleset management
4 : *
5 : * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 : * Copyright © 2018-2020 ANSSI
7 : */
8 :
9 : #include <linux/bits.h>
10 : #include <linux/bug.h>
11 : #include <linux/compiler_types.h>
12 : #include <linux/err.h>
13 : #include <linux/errno.h>
14 : #include <linux/kernel.h>
15 : #include <linux/lockdep.h>
16 : #include <linux/overflow.h>
17 : #include <linux/rbtree.h>
18 : #include <linux/refcount.h>
19 : #include <linux/slab.h>
20 : #include <linux/spinlock.h>
21 : #include <linux/workqueue.h>
22 :
23 : #include "limits.h"
24 : #include "object.h"
25 : #include "ruleset.h"
26 :
27 221 : static struct landlock_ruleset *create_ruleset(const u32 num_layers)
28 : {
29 221 : struct landlock_ruleset *new_ruleset;
30 :
31 221 : new_ruleset = kzalloc(struct_size(new_ruleset, fs_access_masks,
32 : num_layers), GFP_KERNEL_ACCOUNT);
33 221 : if (!new_ruleset)
34 221 : return ERR_PTR(-ENOMEM);
35 221 : refcount_set(&new_ruleset->usage, 1);
36 221 : mutex_init(&new_ruleset->lock);
37 221 : new_ruleset->root = RB_ROOT;
38 221 : new_ruleset->num_layers = num_layers;
39 : /*
40 : * hierarchy = NULL
41 : * num_rules = 0
42 : * fs_access_masks[] = 0
43 : */
44 221 : return new_ruleset;
45 : }
46 :
47 81 : struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask)
48 : {
49 81 : struct landlock_ruleset *new_ruleset;
50 :
51 : /* Informs about useless ruleset. */
52 81 : if (!fs_access_mask)
53 81 : return ERR_PTR(-ENOMSG);
54 78 : new_ruleset = create_ruleset(1);
55 78 : if (!IS_ERR(new_ruleset))
56 78 : new_ruleset->fs_access_masks[0] = fs_access_mask;
57 : return new_ruleset;
58 : }
59 :
60 413 : static void build_check_rule(void)
61 : {
62 413 : const struct landlock_rule rule = {
63 : .num_layers = ~0,
64 : };
65 :
66 413 : BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
67 : }
68 :
69 413 : static struct landlock_rule *create_rule(
70 : struct landlock_object *const object,
71 : const struct landlock_layer (*const layers)[],
72 : const u32 num_layers,
73 : const struct landlock_layer *const new_layer)
74 : {
75 413 : struct landlock_rule *new_rule;
76 413 : u32 new_num_layers;
77 :
78 413 : build_check_rule();
79 413 : if (new_layer) {
80 : /* Should already be checked by landlock_merge_ruleset(). */
81 73 : if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
82 413 : return ERR_PTR(-E2BIG);
83 73 : new_num_layers = num_layers + 1;
84 : } else {
85 : new_num_layers = num_layers;
86 : }
87 413 : new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
88 : GFP_KERNEL_ACCOUNT);
89 413 : if (!new_rule)
90 413 : return ERR_PTR(-ENOMEM);
91 413 : RB_CLEAR_NODE(&new_rule->node);
92 413 : landlock_get_object(object);
93 413 : new_rule->object = object;
94 413 : new_rule->num_layers = new_num_layers;
95 : /* Copies the original layer stack. */
96 413 : memcpy(new_rule->layers, layers,
97 : flex_array_size(new_rule, layers, num_layers));
98 413 : if (new_layer)
99 : /* Adds a copy of @new_layer on the layer stack. */
100 73 : new_rule->layers[new_rule->num_layers - 1] = *new_layer;
101 : return new_rule;
102 : }
103 :
104 413 : static void free_rule(struct landlock_rule *const rule)
105 : {
106 413 : might_sleep();
107 413 : if (!rule)
108 : return;
109 413 : landlock_put_object(rule->object);
110 413 : kfree(rule);
111 : }
112 :
113 340 : static void build_check_ruleset(void)
114 : {
115 340 : const struct landlock_ruleset ruleset = {
116 : .num_rules = ~0,
117 : .num_layers = ~0,
118 : };
119 340 : typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
120 :
121 340 : BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
122 340 : BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
123 340 : BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
124 : }
125 :
126 : /**
127 : * insert_rule - Create and insert a rule in a ruleset
128 : *
129 : * @ruleset: The ruleset to be updated.
130 : * @object: The object to build the new rule with. The underlying kernel
131 : * object must be held by the caller.
132 : * @layers: One or multiple layers to be copied into the new rule.
133 : * @num_layers: The number of @layers entries.
134 : *
135 : * When user space requests to add a new rule to a ruleset, @layers only
136 : * contains one entry and this entry is not assigned to any level. In this
137 : * case, the new rule will extend @ruleset, similarly to a boolean OR between
138 : * access rights.
139 : *
140 : * When merging a ruleset in a domain, or copying a domain, @layers will be
141 : * added to @ruleset as new constraints, similarly to a boolean AND between
142 : * access rights.
143 : */
144 419 : static int insert_rule(struct landlock_ruleset *const ruleset,
145 : struct landlock_object *const object,
146 : const struct landlock_layer (*const layers)[],
147 : size_t num_layers)
148 : {
149 419 : struct rb_node **walker_node;
150 419 : struct rb_node *parent_node = NULL;
151 419 : struct landlock_rule *new_rule;
152 :
153 419 : might_sleep();
154 838 : lockdep_assert_held(&ruleset->lock);
155 419 : if (WARN_ON_ONCE(!object || !layers))
156 : return -ENOENT;
157 419 : walker_node = &(ruleset->root.rb_node);
158 795 : while (*walker_node) {
159 455 : struct landlock_rule *const this = rb_entry(*walker_node,
160 : struct landlock_rule, node);
161 :
162 455 : if (this->object != object) {
163 376 : parent_node = *walker_node;
164 376 : if (this->object < object)
165 252 : walker_node = &((*walker_node)->rb_right);
166 : else
167 124 : walker_node = &((*walker_node)->rb_left);
168 376 : continue;
169 : }
170 :
171 : /* Only a single-level layer should match an existing rule. */
172 79 : if (WARN_ON_ONCE(num_layers != 1))
173 : return -EINVAL;
174 :
175 : /* If there is a matching rule, updates it. */
176 79 : if ((*layers)[0].level == 0) {
177 : /*
178 : * Extends access rights when the request comes from
179 : * landlock_add_rule(2), i.e. @ruleset is not a domain.
180 : */
181 6 : if (WARN_ON_ONCE(this->num_layers != 1))
182 : return -EINVAL;
183 6 : if (WARN_ON_ONCE(this->layers[0].level != 0))
184 : return -EINVAL;
185 6 : this->layers[0].access |= (*layers)[0].access;
186 6 : return 0;
187 : }
188 :
189 73 : if (WARN_ON_ONCE(this->layers[0].level == 0))
190 : return -EINVAL;
191 :
192 : /*
193 : * Intersects access rights when it is a merge between a
194 : * ruleset and a domain.
195 : */
196 73 : new_rule = create_rule(object, &this->layers, this->num_layers,
197 : &(*layers)[0]);
198 73 : if (IS_ERR(new_rule))
199 0 : return PTR_ERR(new_rule);
200 73 : rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
201 73 : free_rule(this);
202 73 : return 0;
203 : }
204 :
205 : /* There is no match for @object. */
206 340 : build_check_ruleset();
207 340 : if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
208 : return -E2BIG;
209 340 : new_rule = create_rule(object, layers, num_layers, NULL);
210 340 : if (IS_ERR(new_rule))
211 0 : return PTR_ERR(new_rule);
212 340 : rb_link_node(&new_rule->node, parent_node, walker_node);
213 340 : rb_insert_color(&new_rule->node, &ruleset->root);
214 340 : ruleset->num_rules++;
215 340 : return 0;
216 : }
217 :
218 104 : static void build_check_layer(void)
219 : {
220 104 : const struct landlock_layer layer = {
221 : .level = ~0,
222 : .access = ~0,
223 : };
224 :
225 104 : BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
226 104 : BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
227 : }
228 :
229 : /* @ruleset must be locked by the caller. */
230 104 : int landlock_insert_rule(struct landlock_ruleset *const ruleset,
231 : struct landlock_object *const object, const u32 access)
232 : {
233 104 : struct landlock_layer layers[] = {{
234 : .access = access,
235 : /* When @level is zero, insert_rule() extends @ruleset. */
236 : .level = 0,
237 : }};
238 :
239 104 : build_check_layer();
240 104 : return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
241 : }
242 :
243 92 : static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
244 : {
245 92 : if (hierarchy)
246 92 : refcount_inc(&hierarchy->usage);
247 92 : }
248 :
249 221 : static void put_hierarchy(struct landlock_hierarchy *hierarchy)
250 : {
251 364 : while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
252 143 : const struct landlock_hierarchy *const freeme = hierarchy;
253 :
254 143 : hierarchy = hierarchy->parent;
255 143 : kfree(freeme);
256 : }
257 221 : }
258 :
259 143 : static int merge_ruleset(struct landlock_ruleset *const dst,
260 : struct landlock_ruleset *const src)
261 : {
262 143 : struct landlock_rule *walker_rule, *next_rule;
263 143 : int err = 0;
264 :
265 143 : might_sleep();
266 : /* Should already be checked by landlock_merge_ruleset() */
267 143 : if (WARN_ON_ONCE(!src))
268 : return 0;
269 : /* Only merge into a domain. */
270 286 : if (WARN_ON_ONCE(!dst || !dst->hierarchy))
271 : return -EINVAL;
272 :
273 : /* Locks @dst first because we are its only owner. */
274 143 : mutex_lock(&dst->lock);
275 143 : mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
276 :
277 : /* Stacks the new layer. */
278 286 : if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
279 0 : err = -EINVAL;
280 0 : goto out_unlock;
281 : }
282 143 : dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
283 :
284 : /* Merges the @src tree. */
285 451 : rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
286 : &src->root, node) {
287 165 : struct landlock_layer layers[] = {{
288 165 : .level = dst->num_layers,
289 : }};
290 :
291 165 : if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
292 0 : err = -EINVAL;
293 0 : goto out_unlock;
294 : }
295 165 : if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
296 0 : err = -EINVAL;
297 0 : goto out_unlock;
298 : }
299 165 : layers[0].access = walker_rule->layers[0].access;
300 165 : err = insert_rule(dst, walker_rule->object, &layers,
301 : ARRAY_SIZE(layers));
302 165 : if (err)
303 0 : goto out_unlock;
304 : }
305 :
306 143 : out_unlock:
307 143 : mutex_unlock(&src->lock);
308 143 : mutex_unlock(&dst->lock);
309 143 : return err;
310 : }
311 :
312 143 : static int inherit_ruleset(struct landlock_ruleset *const parent,
313 : struct landlock_ruleset *const child)
314 : {
315 143 : struct landlock_rule *walker_rule, *next_rule;
316 143 : int err = 0;
317 :
318 143 : might_sleep();
319 143 : if (!parent)
320 : return 0;
321 :
322 : /* Locks @child first because we are its only owner. */
323 92 : mutex_lock(&child->lock);
324 92 : mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
325 :
326 : /* Copies the @parent tree. */
327 334 : rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
328 : &parent->root, node) {
329 300 : err = insert_rule(child, walker_rule->object,
330 150 : &walker_rule->layers, walker_rule->num_layers);
331 150 : if (err)
332 0 : goto out_unlock;
333 : }
334 :
335 92 : if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
336 0 : err = -EINVAL;
337 0 : goto out_unlock;
338 : }
339 : /* Copies the parent layer stack and leaves a space for the new layer. */
340 92 : memcpy(child->fs_access_masks, parent->fs_access_masks,
341 : flex_array_size(parent, fs_access_masks, parent->num_layers));
342 :
343 92 : if (WARN_ON_ONCE(!parent->hierarchy)) {
344 0 : err = -EINVAL;
345 0 : goto out_unlock;
346 : }
347 92 : get_hierarchy(parent->hierarchy);
348 92 : child->hierarchy->parent = parent->hierarchy;
349 :
350 92 : out_unlock:
351 92 : mutex_unlock(&parent->lock);
352 92 : mutex_unlock(&child->lock);
353 92 : return err;
354 : }
355 :
356 221 : static void free_ruleset(struct landlock_ruleset *const ruleset)
357 : {
358 221 : struct landlock_rule *freeme, *next;
359 :
360 221 : might_sleep();
361 782 : rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root,
362 : node)
363 340 : free_rule(freeme);
364 221 : put_hierarchy(ruleset->hierarchy);
365 221 : kfree(ruleset);
366 221 : }
367 :
368 485 : void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
369 : {
370 485 : might_sleep();
371 485 : if (ruleset && refcount_dec_and_test(&ruleset->usage))
372 78 : free_ruleset(ruleset);
373 485 : }
374 :
375 143 : static void free_ruleset_work(struct work_struct *const work)
376 : {
377 143 : struct landlock_ruleset *ruleset;
378 :
379 143 : ruleset = container_of(work, struct landlock_ruleset, work_free);
380 143 : free_ruleset(ruleset);
381 143 : }
382 :
383 163 : void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
384 : {
385 163 : if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
386 143 : INIT_WORK(&ruleset->work_free, free_ruleset_work);
387 143 : schedule_work(&ruleset->work_free);
388 : }
389 163 : }
390 :
391 : /**
392 : * landlock_merge_ruleset - Merge a ruleset with a domain
393 : *
394 : * @parent: Parent domain.
395 : * @ruleset: New ruleset to be merged.
396 : *
397 : * Returns the intersection of @parent and @ruleset, or returns @parent if
398 : * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
399 : */
400 145 : struct landlock_ruleset *landlock_merge_ruleset(
401 : struct landlock_ruleset *const parent,
402 : struct landlock_ruleset *const ruleset)
403 : {
404 145 : struct landlock_ruleset *new_dom;
405 145 : u32 num_layers;
406 145 : int err;
407 :
408 145 : might_sleep();
409 145 : if (WARN_ON_ONCE(!ruleset || parent == ruleset))
410 145 : return ERR_PTR(-EINVAL);
411 :
412 145 : if (parent) {
413 94 : if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
414 145 : return ERR_PTR(-E2BIG);
415 92 : num_layers = parent->num_layers + 1;
416 : } else {
417 : num_layers = 1;
418 : }
419 :
420 : /* Creates a new domain... */
421 143 : new_dom = create_ruleset(num_layers);
422 143 : if (IS_ERR(new_dom))
423 : return new_dom;
424 143 : new_dom->hierarchy = kzalloc(sizeof(*new_dom->hierarchy),
425 : GFP_KERNEL_ACCOUNT);
426 143 : if (!new_dom->hierarchy) {
427 0 : err = -ENOMEM;
428 0 : goto out_put_dom;
429 : }
430 143 : refcount_set(&new_dom->hierarchy->usage, 1);
431 :
432 : /* ...as a child of @parent... */
433 143 : err = inherit_ruleset(parent, new_dom);
434 143 : if (err)
435 0 : goto out_put_dom;
436 :
437 : /* ...and including @ruleset. */
438 143 : err = merge_ruleset(new_dom, ruleset);
439 143 : if (err)
440 0 : goto out_put_dom;
441 :
442 : return new_dom;
443 :
444 0 : out_put_dom:
445 0 : landlock_put_ruleset(new_dom);
446 0 : return ERR_PTR(err);
447 : }
448 :
449 : /*
450 : * The returned access has the same lifetime as @ruleset.
451 : */
452 1808 : const struct landlock_rule *landlock_find_rule(
453 : const struct landlock_ruleset *const ruleset,
454 : const struct landlock_object *const object)
455 : {
456 1808 : const struct rb_node *node;
457 :
458 1808 : if (!object)
459 : return NULL;
460 588 : node = ruleset->root.rb_node;
461 1312 : while (node) {
462 1312 : struct landlock_rule *this = rb_entry(node,
463 : struct landlock_rule, node);
464 :
465 1312 : if (this->object == object)
466 588 : return this;
467 724 : if (this->object < object)
468 388 : node = node->rb_right;
469 : else
470 336 : node = node->rb_left;
471 : }
472 : return NULL;
473 : }
|