blob: fe5cb7696993da9b7e15b2065ae8ba15c362b423 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07002/*
Lai Jiangshan47c59802008-10-18 20:28:07 -07003 * device_cgroup.c - device cgroup subsystem
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07004 *
5 * Copyright 2007 IBM Corp
6 */
7
8#include <linux/device_cgroup.h>
9#include <linux/cgroup.h>
10#include <linux/ctype.h>
11#include <linux/list.h>
12#include <linux/uaccess.h>
Serge E. Hallyn29486df2008-04-29 01:00:14 -070013#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Lai Jiangshan47c59802008-10-18 20:28:07 -070015#include <linux/rcupdate.h>
Li Zefanb4046f02009-04-02 16:57:32 -070016#include <linux/mutex.h>
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070017
Odin Ugedaleec8fd02020-04-03 19:55:28 +020018#ifdef CONFIG_CGROUP_DEVICE
19
Li Zefanb4046f02009-04-02 16:57:32 -070020static DEFINE_MUTEX(devcgroup_mutex);
21
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050022enum devcg_behavior {
23 DEVCG_DEFAULT_NONE,
24 DEVCG_DEFAULT_ALLOW,
25 DEVCG_DEFAULT_DENY,
26};
27
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070028/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070029 * exception list locking rules:
Li Zefanb4046f02009-04-02 16:57:32 -070030 * hold devcgroup_mutex for update/read.
Lai Jiangshan47c59802008-10-18 20:28:07 -070031 * hold rcu_read_lock() for read.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070032 */
33
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070034struct dev_exception_item {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070035 u32 major, minor;
36 short type;
37 short access;
38 struct list_head list;
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -070039 struct rcu_head rcu;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070040};
41
42struct dev_cgroup {
43 struct cgroup_subsys_state css;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070044 struct list_head exceptions;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050045 enum devcg_behavior behavior;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070046};
47
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070048static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
49{
Tejun Heoa7c6d552013-08-08 20:11:23 -040050 return s ? container_of(s, struct dev_cgroup, css) : NULL;
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070051}
52
Paul Menagef92523e2008-07-25 01:47:03 -070053static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
54{
Tejun Heo073219e2014-02-08 10:36:58 -050055 return css_to_devcgroup(task_css(task, devices_cgrp_id));
Paul Menagef92523e2008-07-25 01:47:03 -070056}
57
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070058/*
Li Zefanb4046f02009-04-02 16:57:32 -070059 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070060 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070061static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070062{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070063 struct dev_exception_item *ex, *tmp, *new;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070064
Tejun Heo4b1c7842012-11-06 09:16:53 -080065 lockdep_assert_held(&devcgroup_mutex);
66
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070067 list_for_each_entry(ex, orig, list) {
68 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070069 if (!new)
70 goto free_and_exit;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070071 list_add_tail(&new->list, dest);
72 }
73
74 return 0;
75
76free_and_exit:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070077 list_for_each_entry_safe(ex, tmp, dest, list) {
78 list_del(&ex->list);
79 kfree(ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070080 }
81 return -ENOMEM;
82}
83
Wang Weiyang21a773e2022-10-25 19:31:01 +080084static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
85{
86 struct dev_exception_item *ex, *tmp;
87
88 lockdep_assert_held(&devcgroup_mutex);
89
90 list_for_each_entry_safe(ex, tmp, orig, list) {
91 list_move_tail(&ex->list, dest);
92 }
93}
94
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070095/*
Li Zefanb4046f02009-04-02 16:57:32 -070096 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070097 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070098static int dev_exception_add(struct dev_cgroup *dev_cgroup,
99 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700100{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700101 struct dev_exception_item *excopy, *walk;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700102
Tejun Heo4b1c7842012-11-06 09:16:53 -0800103 lockdep_assert_held(&devcgroup_mutex);
104
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700105 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
106 if (!excopy)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700107 return -ENOMEM;
108
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700109 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
110 if (walk->type != ex->type)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700111 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700112 if (walk->major != ex->major)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700113 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700114 if (walk->minor != ex->minor)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700115 continue;
116
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700117 walk->access |= ex->access;
118 kfree(excopy);
119 excopy = NULL;
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700120 }
121
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700122 if (excopy != NULL)
123 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700124 return 0;
125}
126
127/*
Li Zefanb4046f02009-04-02 16:57:32 -0700128 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700129 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700130static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
131 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700132{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700133 struct dev_exception_item *walk, *tmp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700134
Tejun Heo4b1c7842012-11-06 09:16:53 -0800135 lockdep_assert_held(&devcgroup_mutex);
136
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700137 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
138 if (walk->type != ex->type)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700139 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700140 if (walk->major != ex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700141 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700142 if (walk->minor != ex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700143 continue;
144
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700145 walk->access &= ~ex->access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700146 if (!walk->access) {
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700147 list_del_rcu(&walk->list);
Lai Jiangshan6034f7e2011-03-15 18:07:57 +0800148 kfree_rcu(walk, rcu);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700149 }
150 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700151}
152
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800153static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
154{
155 struct dev_exception_item *ex, *tmp;
156
157 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
158 list_del_rcu(&ex->list);
159 kfree_rcu(ex, rcu);
160 }
161}
162
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700163/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700164 * dev_exception_clean - frees all entries of the exception list
165 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700166 *
167 * called under devcgroup_mutex
168 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700169static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700170{
Tejun Heo4b1c7842012-11-06 09:16:53 -0800171 lockdep_assert_held(&devcgroup_mutex);
172
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800173 __dev_exception_clean(dev_cgroup);
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700174}
175
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500176static inline bool is_devcg_online(const struct dev_cgroup *devcg)
177{
178 return (devcg->behavior != DEVCG_DEFAULT_NONE);
179}
180
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500181/**
182 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
183 * parent's
Tejun Heoeb954192013-08-08 20:11:23 -0400184 * @css: css getting online
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500185 * returns 0 in case of success, error code otherwise
186 */
Tejun Heoeb954192013-08-08 20:11:23 -0400187static int devcgroup_online(struct cgroup_subsys_state *css)
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500188{
Tejun Heoeb954192013-08-08 20:11:23 -0400189 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -0400190 struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500191 int ret = 0;
192
193 mutex_lock(&devcgroup_mutex);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500194
195 if (parent_dev_cgroup == NULL)
196 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
197 else {
198 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
199 &parent_dev_cgroup->exceptions);
200 if (!ret)
201 dev_cgroup->behavior = parent_dev_cgroup->behavior;
202 }
203 mutex_unlock(&devcgroup_mutex);
204
205 return ret;
206}
207
Tejun Heoeb954192013-08-08 20:11:23 -0400208static void devcgroup_offline(struct cgroup_subsys_state *css)
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500209{
Tejun Heoeb954192013-08-08 20:11:23 -0400210 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500211
212 mutex_lock(&devcgroup_mutex);
213 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
214 mutex_unlock(&devcgroup_mutex);
215}
216
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700217/*
218 * called from kernel/cgroup.c with cgroup_lock() held.
219 */
Tejun Heoeb954192013-08-08 20:11:23 -0400220static struct cgroup_subsys_state *
221devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700222{
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500223 struct dev_cgroup *dev_cgroup;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700224
225 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
226 if (!dev_cgroup)
227 return ERR_PTR(-ENOMEM);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700228 INIT_LIST_HEAD(&dev_cgroup->exceptions);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500229 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700230
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700231 return &dev_cgroup->css;
232}
233
Tejun Heoeb954192013-08-08 20:11:23 -0400234static void devcgroup_css_free(struct cgroup_subsys_state *css)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700235{
Tejun Heoeb954192013-08-08 20:11:23 -0400236 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700237
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800238 __dev_exception_clean(dev_cgroup);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700239 kfree(dev_cgroup);
240}
241
242#define DEVCG_ALLOW 1
243#define DEVCG_DENY 2
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700244#define DEVCG_LIST 3
245
Li Zefan17d213f2008-07-13 12:14:02 -0700246#define MAJMINLEN 13
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700247#define ACCLEN 4
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700248
249static void set_access(char *acc, short access)
250{
251 int idx = 0;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700252 memset(acc, 0, ACCLEN);
Roman Gushchin67e306f2017-11-05 08:15:30 -0500253 if (access & DEVCG_ACC_READ)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700254 acc[idx++] = 'r';
Roman Gushchin67e306f2017-11-05 08:15:30 -0500255 if (access & DEVCG_ACC_WRITE)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700256 acc[idx++] = 'w';
Roman Gushchin67e306f2017-11-05 08:15:30 -0500257 if (access & DEVCG_ACC_MKNOD)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700258 acc[idx++] = 'm';
259}
260
261static char type_to_char(short type)
262{
Roman Gushchin67e306f2017-11-05 08:15:30 -0500263 if (type == DEVCG_DEV_ALL)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700264 return 'a';
Roman Gushchin67e306f2017-11-05 08:15:30 -0500265 if (type == DEVCG_DEV_CHAR)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700266 return 'c';
Roman Gushchin67e306f2017-11-05 08:15:30 -0500267 if (type == DEVCG_DEV_BLOCK)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700268 return 'b';
269 return 'X';
270}
271
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700272static void set_majmin(char *str, unsigned m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700273{
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700274 if (m == ~0)
Li Zefan7759fc92008-07-25 01:47:08 -0700275 strcpy(str, "*");
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700276 else
Li Zefan7759fc92008-07-25 01:47:08 -0700277 sprintf(str, "%u", m);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700278}
279
Tejun Heo2da8ca82013-12-05 12:28:04 -0500280static int devcgroup_seq_show(struct seq_file *m, void *v)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700281{
Tejun Heo2da8ca82013-12-05 12:28:04 -0500282 struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700283 struct dev_exception_item *ex;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700284 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700285
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700286 rcu_read_lock();
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700287 /*
288 * To preserve the compatibility:
289 * - Only show the "all devices" when the default policy is to allow
290 * - List the exceptions in case the default policy is to deny
291 * This way, the file remains as a "whitelist of devices"
292 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700293 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Roman Gushchin67e306f2017-11-05 08:15:30 -0500294 set_access(acc, DEVCG_ACC_MASK);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700295 set_majmin(maj, ~0);
296 set_majmin(min, ~0);
Roman Gushchin67e306f2017-11-05 08:15:30 -0500297 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL),
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700298 maj, min, acc);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700299 } else {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700300 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
301 set_access(acc, ex->access);
302 set_majmin(maj, ex->major);
303 set_majmin(min, ex->minor);
304 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700305 maj, min, acc);
306 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700307 }
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700308 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700309
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700310 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700311}
312
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700313/**
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400314 * match_exception - iterates the exception list trying to find a complete match
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400315 * @exceptions: list of exceptions
Roman Gushchin67e306f2017-11-05 08:15:30 -0500316 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400317 * @major: device file major number, ~0 to match all
318 * @minor: device file minor number, ~0 to match all
Roman Gushchin67e306f2017-11-05 08:15:30 -0500319 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400320 *
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400321 * It is considered a complete match if an exception is found that will
322 * contain the entire range of provided parameters.
323 *
324 * Return: true in case it matches an exception completely
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700325 */
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400326static bool match_exception(struct list_head *exceptions, short type,
327 u32 major, u32 minor, short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700328{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700329 struct dev_exception_item *ex;
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400330
331 list_for_each_entry_rcu(ex, exceptions, list) {
Roman Gushchin67e306f2017-11-05 08:15:30 -0500332 if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400333 continue;
Roman Gushchin67e306f2017-11-05 08:15:30 -0500334 if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400335 continue;
336 if (ex->major != ~0 && ex->major != major)
337 continue;
338 if (ex->minor != ~0 && ex->minor != minor)
339 continue;
340 /* provided access cannot have more than the exception rule */
341 if (access & (~ex->access))
342 continue;
343 return true;
344 }
345 return false;
346}
347
348/**
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400349 * match_exception_partial - iterates the exception list trying to find a partial match
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400350 * @exceptions: list of exceptions
Roman Gushchin67e306f2017-11-05 08:15:30 -0500351 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400352 * @major: device file major number, ~0 to match all
353 * @minor: device file minor number, ~0 to match all
Roman Gushchin67e306f2017-11-05 08:15:30 -0500354 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400355 *
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400356 * It is considered a partial match if an exception's range is found to
357 * contain *any* of the devices specified by provided parameters. This is
358 * used to make sure no extra access is being granted that is forbidden by
359 * any of the exception list.
360 *
361 * Return: true in case the provided range mat matches an exception completely
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400362 */
363static bool match_exception_partial(struct list_head *exceptions, short type,
364 u32 major, u32 minor, short access)
365{
366 struct dev_exception_item *ex;
367
Amol Groverbc62d682020-04-06 16:29:50 +0530368 list_for_each_entry_rcu(ex, exceptions, list,
369 lockdep_is_held(&devcgroup_mutex)) {
Roman Gushchin67e306f2017-11-05 08:15:30 -0500370 if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400371 continue;
Roman Gushchin67e306f2017-11-05 08:15:30 -0500372 if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400373 continue;
374 /*
375 * We must be sure that both the exception and the provided
376 * range aren't masking all devices
377 */
378 if (ex->major != ~0 && major != ~0 && ex->major != major)
379 continue;
380 if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
381 continue;
382 /*
383 * In order to make sure the provided range isn't matching
384 * an exception, all its access bits shouldn't match the
385 * exception's access bits
386 */
387 if (!(access & ex->access))
388 continue;
389 return true;
390 }
391 return false;
392}
393
394/**
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400395 * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400396 * @dev_cgroup: dev cgroup to be tested against
397 * @refex: new exception
398 * @behavior: behavior of the exception's dev_cgroup
Aristeu Rozanskif5f3cf6f2014-04-24 15:33:21 -0400399 *
400 * This is used to make sure a child cgroup won't have more privileges
401 * than its parent
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400402 */
403static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
404 struct dev_exception_item *refex,
405 enum devcg_behavior behavior)
406{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700407 bool match = false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700408
Paul E. McKenneyf78f5b92015-06-18 15:50:02 -0700409 RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
Paul E. McKenneydc3a04d2015-09-02 17:11:22 -0700410 !lockdep_is_held(&devcgroup_mutex),
Paul E. McKenneyf78f5b92015-06-18 15:50:02 -0700411 "device_cgroup:verify_new_ex called without proper synchronization");
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700412
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500413 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
414 if (behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400415 /*
416 * new exception in the child doesn't matter, only
417 * adding extra restrictions
418 */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500419 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500420 } else {
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400421 /*
422 * new exception in the child will add more devices
423 * that can be acessed, so it can't match any of
424 * parent's exceptions, even slightly
425 */
426 match = match_exception_partial(&dev_cgroup->exceptions,
427 refex->type,
428 refex->major,
429 refex->minor,
430 refex->access);
431
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500432 if (match)
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500433 return false;
434 return true;
435 }
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500436 } else {
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400437 /*
438 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
439 * the new exception will add access to more devices and must
440 * be contained completely in an parent's exception to be
441 * allowed
442 */
443 match = match_exception(&dev_cgroup->exceptions, refex->type,
444 refex->major, refex->minor,
445 refex->access);
446
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500447 if (match)
448 /* parent has an exception that matches the proposed */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500449 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500450 else
451 return false;
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500452 }
453 return false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700454}
455
456/*
457 * parent_has_perm:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700458 * when adding a new allow rule to a device exception list, the rule
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700459 * must be allowed in the parent device
460 */
Paul Menagef92523e2008-07-25 01:47:03 -0700461static int parent_has_perm(struct dev_cgroup *childcg,
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700462 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700463{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400464 struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700465
Tejun Heo63876982013-08-08 20:11:23 -0400466 if (!parent)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700467 return 1;
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400468 return verify_new_ex(parent, ex, childcg->behavior);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700469}
470
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700471/**
Aristeu Rozanskid2c2b112014-05-05 11:18:59 -0400472 * parent_allows_removal - verify if it's ok to remove an exception
473 * @childcg: child cgroup from where the exception will be removed
474 * @ex: exception being removed
475 *
476 * When removing an exception in cgroups with default ALLOW policy, it must
477 * be checked if removing it will give the child cgroup more access than the
478 * parent.
479 *
480 * Return: true if it's ok to remove exception, false otherwise
481 */
482static bool parent_allows_removal(struct dev_cgroup *childcg,
483 struct dev_exception_item *ex)
484{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400485 struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
Aristeu Rozanskid2c2b112014-05-05 11:18:59 -0400486
487 if (!parent)
488 return true;
489
490 /* It's always allowed to remove access to devices */
491 if (childcg->behavior == DEVCG_DEFAULT_DENY)
492 return true;
493
494 /*
495 * Make sure you're not removing part or a whole exception existing in
496 * the parent cgroup
497 */
498 return !match_exception_partial(&parent->exceptions, ex->type,
499 ex->major, ex->minor, ex->access);
500}
501
502/**
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700503 * may_allow_all - checks if it's possible to change the behavior to
504 * allow based on parent's rules.
505 * @parent: device cgroup's parent
506 * returns: != 0 in case it's allowed, 0 otherwise
507 */
508static inline int may_allow_all(struct dev_cgroup *parent)
509{
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800510 if (!parent)
511 return 1;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700512 return parent->behavior == DEVCG_DEFAULT_ALLOW;
513}
514
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500515/**
516 * revalidate_active_exceptions - walks through the active exception list and
517 * revalidates the exceptions based on parent's
518 * behavior and exceptions. The exceptions that
519 * are no longer valid will be removed.
520 * Called with devcgroup_mutex held.
521 * @devcg: cgroup which exceptions will be checked
522 *
523 * This is one of the three key functions for hierarchy implementation.
524 * This function is responsible for re-evaluating all the cgroup's active
525 * exceptions due to a parent's exception change.
Mauro Carvalho Chehabda82c922019-06-27 13:08:35 -0300526 * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500527 */
528static void revalidate_active_exceptions(struct dev_cgroup *devcg)
529{
530 struct dev_exception_item *ex;
531 struct list_head *this, *tmp;
532
533 list_for_each_safe(this, tmp, &devcg->exceptions) {
534 ex = container_of(this, struct dev_exception_item, list);
535 if (!parent_has_perm(devcg, ex))
536 dev_exception_rm(devcg, ex);
537 }
538}
539
540/**
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500541 * propagate_exception - propagates a new exception to the children
542 * @devcg_root: device cgroup that added a new exception
543 * @ex: new exception to be propagated
544 *
545 * returns: 0 in case of success, != 0 in case of error
546 */
547static int propagate_exception(struct dev_cgroup *devcg_root,
548 struct dev_exception_item *ex)
549{
Tejun Heo492eb212013-08-08 20:11:25 -0400550 struct cgroup_subsys_state *pos;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500551 int rc = 0;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500552
Tejun Heod591fb52013-05-24 10:55:38 +0900553 rcu_read_lock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500554
Tejun Heo492eb212013-08-08 20:11:25 -0400555 css_for_each_descendant_pre(pos, &devcg_root->css) {
556 struct dev_cgroup *devcg = css_to_devcgroup(pos);
Tejun Heod591fb52013-05-24 10:55:38 +0900557
558 /*
559 * Because devcgroup_mutex is held, no devcg will become
560 * online or offline during the tree walk (see on/offline
561 * methods), and online ones are safe to access outside RCU
562 * read lock without bumping refcnt.
563 */
Tejun Heobd8815a2013-08-08 20:11:27 -0400564 if (pos == &devcg_root->css || !is_devcg_online(devcg))
Tejun Heod591fb52013-05-24 10:55:38 +0900565 continue;
566
567 rcu_read_unlock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500568
569 /*
570 * in case both root's behavior and devcg is allow, a new
571 * restriction means adding to the exception list
572 */
573 if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
574 devcg->behavior == DEVCG_DEFAULT_ALLOW) {
575 rc = dev_exception_add(devcg, ex);
576 if (rc)
Jann Horn0fcc4c82019-03-19 02:36:59 +0100577 return rc;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500578 } else {
579 /*
580 * in the other possible cases:
581 * root's behavior: allow, devcg's: deny
582 * root's behavior: deny, devcg's: deny
583 * the exception will be removed
584 */
585 dev_exception_rm(devcg, ex);
586 }
587 revalidate_active_exceptions(devcg);
588
Tejun Heod591fb52013-05-24 10:55:38 +0900589 rcu_read_lock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500590 }
Tejun Heod591fb52013-05-24 10:55:38 +0900591
592 rcu_read_unlock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500593 return rc;
594}
595
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700596/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700597 * Modify the exception list using allow/deny rules.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700598 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
599 * so we can give a container CAP_MKNOD to let it create devices but not
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700600 * modify the exception list.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700601 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
602 * us to also grant CAP_SYS_ADMIN to containers without giving away the
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700603 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700604 *
605 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
606 * new access is only allowed if you're in the top-level cgroup, or your
607 * parent cgroup has the access you're asking for.
608 */
Paul Menagef92523e2008-07-25 01:47:03 -0700609static int devcgroup_update_access(struct dev_cgroup *devcgroup,
Tejun Heo4d3bb512014-03-19 10:23:54 -0400610 int filetype, char *buffer)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700611{
Paul Menagef92523e2008-07-25 01:47:03 -0700612 const char *b;
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700613 char temp[12]; /* 11 + 1 characters needed for a u32 */
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500614 int count, rc = 0;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700615 struct dev_exception_item ex;
Tejun Heo5c9d5352014-05-16 13:22:48 -0400616 struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
Wang Weiyang21a773e2022-10-25 19:31:01 +0800617 struct dev_cgroup tmp_devcgrp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700618
619 if (!capable(CAP_SYS_ADMIN))
620 return -EPERM;
621
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700622 memset(&ex, 0, sizeof(ex));
Wang Weiyang21a773e2022-10-25 19:31:01 +0800623 memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700624 b = buffer;
625
626 switch (*b) {
627 case 'a':
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700628 switch (filetype) {
629 case DEVCG_ALLOW:
Tejun Heo7a3bb242014-05-16 13:22:52 -0400630 if (css_has_online_children(&devcgroup->css))
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500631 return -EINVAL;
632
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700633 if (!may_allow_all(parent))
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700634 return -EPERM;
Wang Weiyang21a773e2022-10-25 19:31:01 +0800635 if (!parent) {
636 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
637 dev_exception_clean(devcgroup);
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800638 break;
Wang Weiyang21a773e2022-10-25 19:31:01 +0800639 }
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800640
Wang Weiyang21a773e2022-10-25 19:31:01 +0800641 INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
642 rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
643 &devcgroup->exceptions);
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700644 if (rc)
645 return rc;
Wang Weiyang21a773e2022-10-25 19:31:01 +0800646 dev_exception_clean(devcgroup);
647 rc = dev_exceptions_copy(&devcgroup->exceptions,
648 &parent->exceptions);
649 if (rc) {
650 dev_exceptions_move(&devcgroup->exceptions,
651 &tmp_devcgrp.exceptions);
652 return rc;
653 }
654 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
655 dev_exception_clean(&tmp_devcgrp);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700656 break;
657 case DEVCG_DENY:
Tejun Heo7a3bb242014-05-16 13:22:52 -0400658 if (css_has_online_children(&devcgroup->css))
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500659 return -EINVAL;
660
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700661 dev_exception_clean(devcgroup);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700662 devcgroup->behavior = DEVCG_DEFAULT_DENY;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700663 break;
664 default:
665 return -EINVAL;
666 }
667 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700668 case 'b':
Roman Gushchin67e306f2017-11-05 08:15:30 -0500669 ex.type = DEVCG_DEV_BLOCK;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700670 break;
671 case 'c':
Roman Gushchin67e306f2017-11-05 08:15:30 -0500672 ex.type = DEVCG_DEV_CHAR;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700673 break;
674 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700675 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700676 }
677 b++;
Paul Menagef92523e2008-07-25 01:47:03 -0700678 if (!isspace(*b))
679 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700680 b++;
681 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700682 ex.major = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700683 b++;
684 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700685 memset(temp, 0, sizeof(temp));
686 for (count = 0; count < sizeof(temp) - 1; count++) {
687 temp[count] = *b;
688 b++;
689 if (!isdigit(*b))
690 break;
691 }
692 rc = kstrtou32(temp, 10, &ex.major);
693 if (rc)
694 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700695 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700696 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700697 }
Paul Menagef92523e2008-07-25 01:47:03 -0700698 if (*b != ':')
699 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700700 b++;
701
702 /* read minor */
703 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700704 ex.minor = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700705 b++;
706 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700707 memset(temp, 0, sizeof(temp));
708 for (count = 0; count < sizeof(temp) - 1; count++) {
709 temp[count] = *b;
710 b++;
711 if (!isdigit(*b))
712 break;
713 }
714 rc = kstrtou32(temp, 10, &ex.minor);
715 if (rc)
716 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700717 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700718 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700719 }
Paul Menagef92523e2008-07-25 01:47:03 -0700720 if (!isspace(*b))
721 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700722 for (b++, count = 0; count < 3; count++, b++) {
723 switch (*b) {
724 case 'r':
Roman Gushchin67e306f2017-11-05 08:15:30 -0500725 ex.access |= DEVCG_ACC_READ;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700726 break;
727 case 'w':
Roman Gushchin67e306f2017-11-05 08:15:30 -0500728 ex.access |= DEVCG_ACC_WRITE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700729 break;
730 case 'm':
Roman Gushchin67e306f2017-11-05 08:15:30 -0500731 ex.access |= DEVCG_ACC_MKNOD;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700732 break;
733 case '\n':
734 case '\0':
735 count = 3;
736 break;
737 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700738 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700739 }
740 }
741
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700742 switch (filetype) {
743 case DEVCG_ALLOW:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700744 /*
745 * If the default policy is to allow by default, try to remove
746 * an matching exception instead. And be silent about it: we
747 * don't want to break compatibility
748 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700749 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskid2c2b112014-05-05 11:18:59 -0400750 /* Check if the parent allows removing it first */
751 if (!parent_allows_removal(devcgroup, &ex))
752 return -EPERM;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700753 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskid2c2b112014-05-05 11:18:59 -0400754 break;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700755 }
Aristeu Rozanskid2c2b112014-05-05 11:18:59 -0400756
757 if (!parent_has_perm(devcgroup, &ex))
758 return -EPERM;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500759 rc = dev_exception_add(devcgroup, &ex);
760 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700761 case DEVCG_DENY:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700762 /*
763 * If the default policy is to deny by default, try to remove
764 * an matching exception instead. And be silent about it: we
765 * don't want to break compatibility
766 */
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500767 if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700768 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500769 else
770 rc = dev_exception_add(devcgroup, &ex);
771
772 if (rc)
773 break;
774 /* we only propagate new restrictions */
775 rc = propagate_exception(devcgroup, &ex);
776 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700777 default:
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500778 rc = -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700779 }
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500780 return rc;
Paul Menagef92523e2008-07-25 01:47:03 -0700781}
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700782
Tejun Heo451af502014-05-13 12:16:21 -0400783static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
784 char *buf, size_t nbytes, loff_t off)
Paul Menagef92523e2008-07-25 01:47:03 -0700785{
786 int retval;
Li Zefanb4046f02009-04-02 16:57:32 -0700787
788 mutex_lock(&devcgroup_mutex);
Tejun Heo451af502014-05-13 12:16:21 -0400789 retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
790 of_cft(of)->private, strstrip(buf));
Li Zefanb4046f02009-04-02 16:57:32 -0700791 mutex_unlock(&devcgroup_mutex);
Tejun Heo451af502014-05-13 12:16:21 -0400792 return retval ?: nbytes;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700793}
794
795static struct cftype dev_cgroup_files[] = {
796 {
797 .name = "allow",
Tejun Heo451af502014-05-13 12:16:21 -0400798 .write = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700799 .private = DEVCG_ALLOW,
800 },
801 {
802 .name = "deny",
Tejun Heo451af502014-05-13 12:16:21 -0400803 .write = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700804 .private = DEVCG_DENY,
805 },
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700806 {
807 .name = "list",
Tejun Heo2da8ca82013-12-05 12:28:04 -0500808 .seq_show = devcgroup_seq_show,
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700809 .private = DEVCG_LIST,
810 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700811 { } /* terminate */
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700812};
813
Tejun Heo073219e2014-02-08 10:36:58 -0500814struct cgroup_subsys devices_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800815 .css_alloc = devcgroup_css_alloc,
816 .css_free = devcgroup_css_free,
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500817 .css_online = devcgroup_online,
818 .css_offline = devcgroup_offline,
Tejun Heo55779642014-07-15 11:05:09 -0400819 .legacy_cftypes = dev_cgroup_files,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700820};
821
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700822/**
Odin Ugedaleec8fd02020-04-03 19:55:28 +0200823 * devcgroup_legacy_check_permission - checks if an inode operation is permitted
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700824 * @dev_cgroup: the dev cgroup to be tested against
825 * @type: device type
826 * @major: device major number
827 * @minor: device minor number
Roman Gushchin67e306f2017-11-05 08:15:30 -0500828 * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700829 *
830 * returns 0 on success, -EPERM case the operation is not permitted
831 */
Odin Ugedaleec8fd02020-04-03 19:55:28 +0200832static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
Harish Kasiviswanathan4b7d4d42019-05-16 11:37:16 -0400833 short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700834{
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700835 struct dev_cgroup *dev_cgroup;
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400836 bool rc;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700837
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700838 rcu_read_lock();
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700839 dev_cgroup = task_devcgroup(current);
Aristeu Rozanski79d71972014-04-21 12:13:03 -0400840 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
841 /* Can't match any of the exceptions, even partially */
842 rc = !match_exception_partial(&dev_cgroup->exceptions,
843 type, major, minor, access);
844 else
845 /* Need to match completely one exception to be allowed */
846 rc = match_exception(&dev_cgroup->exceptions, type, major,
847 minor, access);
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700848 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700849
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700850 if (!rc)
851 return -EPERM;
852
853 return 0;
854}
Harish Kasiviswanathan4b7d4d42019-05-16 11:37:16 -0400855
Odin Ugedaleec8fd02020-04-03 19:55:28 +0200856#endif /* CONFIG_CGROUP_DEVICE */
857
858#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
859
Harish Kasiviswanathan4b7d4d42019-05-16 11:37:16 -0400860int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
861{
862 int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
863
864 if (rc)
865 return -EPERM;
866
Odin Ugedaleec8fd02020-04-03 19:55:28 +0200867 #ifdef CONFIG_CGROUP_DEVICE
868 return devcgroup_legacy_check_permission(type, major, minor, access);
869
870 #else /* CONFIG_CGROUP_DEVICE */
871 return 0;
872
873 #endif /* CONFIG_CGROUP_DEVICE */
Harish Kasiviswanathan4b7d4d42019-05-16 11:37:16 -0400874}
875EXPORT_SYMBOL(devcgroup_check_permission);
Odin Ugedaleec8fd02020-04-03 19:55:28 +0200876#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */