mirror of https://github.com/OpenIPC/firmware.git
374 lines
8.7 KiB
Diff
374 lines
8.7 KiB
Diff
diff -drupN a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
|
|
--- a/kernel/bpf/syscall.c 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/kernel/bpf/syscall.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -20,6 +20,8 @@
|
|
#include <linux/filter.h>
|
|
#include <linux/version.h>
|
|
|
|
+#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
|
|
+
|
|
DEFINE_PER_CPU(int, bpf_prog_active);
|
|
|
|
int sysctl_unprivileged_bpf_disabled __read_mostly;
|
|
@@ -119,6 +121,7 @@ static void bpf_map_free_deferred(struct
|
|
struct bpf_map *map = container_of(work, struct bpf_map, work);
|
|
|
|
bpf_map_uncharge_memlock(map);
|
|
+ security_bpf_map_free(map);
|
|
/* implementation dependent freeing */
|
|
map->ops->map_free(map);
|
|
}
|
|
@@ -178,17 +181,54 @@ static void bpf_map_show_fdinfo(struct s
|
|
}
|
|
#endif
|
|
|
|
-static const struct file_operations bpf_map_fops = {
|
|
+static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
|
|
+ loff_t *ppos)
|
|
+{
|
|
+ /* We need this handler such that alloc_file() enables
|
|
+ * f_mode with FMODE_CAN_READ.
|
|
+ */
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
|
|
+ size_t siz, loff_t *ppos)
|
|
+{
|
|
+ /* We need this handler such that alloc_file() enables
|
|
+ * f_mode with FMODE_CAN_WRITE.
|
|
+ */
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+const struct file_operations bpf_map_fops = {
|
|
#ifdef CONFIG_PROC_FS
|
|
.show_fdinfo = bpf_map_show_fdinfo,
|
|
#endif
|
|
.release = bpf_map_release,
|
|
+ .read = bpf_dummy_read,
|
|
+ .write = bpf_dummy_write,
|
|
};
|
|
|
|
-int bpf_map_new_fd(struct bpf_map *map)
|
|
+int bpf_map_new_fd(struct bpf_map *map, int flags)
|
|
{
|
|
+ int ret;
|
|
+
|
|
+ ret = security_bpf_map(map, OPEN_FMODE(flags));
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
|
|
- O_RDWR | O_CLOEXEC);
|
|
+ flags | O_CLOEXEC);
|
|
+}
|
|
+
|
|
+int bpf_get_file_flag(int flags)
|
|
+{
|
|
+ if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
|
|
+ return -EINVAL;
|
|
+ if (flags & BPF_F_RDONLY)
|
|
+ return O_RDONLY;
|
|
+ if (flags & BPF_F_WRONLY)
|
|
+ return O_WRONLY;
|
|
+ return O_RDWR;
|
|
}
|
|
|
|
/* helper macro to check that unused fields 'union bpf_attr' are zero */
|
|
@@ -204,12 +244,17 @@ int bpf_map_new_fd(struct bpf_map *map)
|
|
static int map_create(union bpf_attr *attr)
|
|
{
|
|
struct bpf_map *map;
|
|
+ int f_flags;
|
|
int err;
|
|
|
|
err = CHECK_ATTR(BPF_MAP_CREATE);
|
|
if (err)
|
|
return -EINVAL;
|
|
|
|
+ f_flags = bpf_get_file_flag(attr->map_flags);
|
|
+ if (f_flags < 0)
|
|
+ return f_flags;
|
|
+
|
|
/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
|
|
map = find_and_alloc_map(attr);
|
|
if (IS_ERR(map))
|
|
@@ -218,11 +263,15 @@ static int map_create(union bpf_attr *at
|
|
atomic_set(&map->refcnt, 1);
|
|
atomic_set(&map->usercnt, 1);
|
|
|
|
- err = bpf_map_charge_memlock(map);
|
|
+ err = security_bpf_map_alloc(map);
|
|
if (err)
|
|
goto free_map_nouncharge;
|
|
|
|
- err = bpf_map_new_fd(map);
|
|
+ err = bpf_map_charge_memlock(map);
|
|
+ if (err)
|
|
+ goto free_map_sec;
|
|
+
|
|
+ err = bpf_map_new_fd(map, f_flags);
|
|
if (err < 0)
|
|
/* failed to allocate fd */
|
|
goto free_map;
|
|
@@ -231,6 +280,8 @@ static int map_create(union bpf_attr *at
|
|
|
|
free_map:
|
|
bpf_map_uncharge_memlock(map);
|
|
+free_map_sec:
|
|
+ security_bpf_map_free(map);
|
|
free_map_nouncharge:
|
|
map->ops->map_free(map);
|
|
return err;
|
|
@@ -313,6 +364,11 @@ static int map_lookup_elem(union bpf_att
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
+ if (!(f.file->f_mode & FMODE_CAN_READ)) {
|
|
+ err = -EPERM;
|
|
+ goto err_put;
|
|
+ }
|
|
+
|
|
err = -ENOMEM;
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
if (!key)
|
|
@@ -387,6 +443,11 @@ static int map_update_elem(union bpf_att
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
+ if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
|
|
+ err = -EPERM;
|
|
+ goto err_put;
|
|
+ }
|
|
+
|
|
err = -ENOMEM;
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
if (!key)
|
|
@@ -463,6 +524,11 @@ static int map_delete_elem(union bpf_att
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
+ if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
|
|
+ err = -EPERM;
|
|
+ goto err_put;
|
|
+ }
|
|
+
|
|
err = -ENOMEM;
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
if (!key)
|
|
@@ -508,6 +574,11 @@ static int map_get_next_key(union bpf_at
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
+ if (!(f.file->f_mode & FMODE_CAN_READ)) {
|
|
+ err = -EPERM;
|
|
+ goto err_put;
|
|
+ }
|
|
+
|
|
if (ukey) {
|
|
err = -ENOMEM;
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
@@ -611,6 +682,7 @@ static void __bpf_prog_put_rcu(struct rc
|
|
|
|
free_used_maps(aux);
|
|
bpf_prog_uncharge_memlock(aux->prog);
|
|
+ security_bpf_prog_free(aux);
|
|
bpf_prog_free(aux->prog);
|
|
}
|
|
|
|
@@ -629,12 +701,20 @@ static int bpf_prog_release(struct inode
|
|
return 0;
|
|
}
|
|
|
|
-static const struct file_operations bpf_prog_fops = {
|
|
+const struct file_operations bpf_prog_fops = {
|
|
.release = bpf_prog_release,
|
|
+ .read = bpf_dummy_read,
|
|
+ .write = bpf_dummy_write,
|
|
};
|
|
|
|
int bpf_prog_new_fd(struct bpf_prog *prog)
|
|
{
|
|
+ int ret;
|
|
+
|
|
+ ret = security_bpf_prog(prog);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
|
|
O_RDWR | O_CLOEXEC);
|
|
}
|
|
@@ -726,7 +806,9 @@ static int bpf_prog_load(union bpf_attr
|
|
attr->kern_version != LINUX_VERSION_CODE)
|
|
return -EINVAL;
|
|
|
|
- if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
|
|
+ if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
|
|
+ type != BPF_PROG_TYPE_CGROUP_SKB &&
|
|
+ !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
/* plain bpf_prog allocation */
|
|
@@ -734,10 +816,14 @@ static int bpf_prog_load(union bpf_attr
|
|
if (!prog)
|
|
return -ENOMEM;
|
|
|
|
- err = bpf_prog_charge_memlock(prog);
|
|
+ err = security_bpf_prog_alloc(prog->aux);
|
|
if (err)
|
|
goto free_prog_nouncharge;
|
|
|
|
+ err = bpf_prog_charge_memlock(prog);
|
|
+ if (err)
|
|
+ goto free_prog_sec;
|
|
+
|
|
prog->len = attr->insn_cnt;
|
|
|
|
err = -EFAULT;
|
|
@@ -777,16 +863,18 @@ free_used_maps:
|
|
free_used_maps(prog->aux);
|
|
free_prog:
|
|
bpf_prog_uncharge_memlock(prog);
|
|
+free_prog_sec:
|
|
+ security_bpf_prog_free(prog->aux);
|
|
free_prog_nouncharge:
|
|
bpf_prog_free(prog);
|
|
return err;
|
|
}
|
|
|
|
-#define BPF_OBJ_LAST_FIELD bpf_fd
|
|
+#define BPF_OBJ_LAST_FIELD file_flags
|
|
|
|
static int bpf_obj_pin(const union bpf_attr *attr)
|
|
{
|
|
- if (CHECK_ATTR(BPF_OBJ))
|
|
+ if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
|
|
return -EINVAL;
|
|
|
|
return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
|
|
@@ -794,12 +882,93 @@ static int bpf_obj_pin(const union bpf_a
|
|
|
|
static int bpf_obj_get(const union bpf_attr *attr)
|
|
{
|
|
- if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
|
|
+ if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
|
|
+ attr->file_flags & ~BPF_OBJ_FLAG_MASK)
|
|
return -EINVAL;
|
|
|
|
- return bpf_obj_get_user(u64_to_ptr(attr->pathname));
|
|
+ return bpf_obj_get_user(u64_to_ptr(attr->pathname),
|
|
+ attr->file_flags);
|
|
}
|
|
|
|
+#ifdef CONFIG_CGROUP_BPF
|
|
+
|
|
+#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
|
|
+
|
|
+static int bpf_prog_attach(const union bpf_attr *attr)
|
|
+{
|
|
+ struct bpf_prog *prog;
|
|
+ struct cgroup *cgrp;
|
|
+ int ret;
|
|
+
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
+ return -EPERM;
|
|
+
|
|
+ if (CHECK_ATTR(BPF_PROG_ATTACH))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ switch (attr->attach_type) {
|
|
+ case BPF_CGROUP_INET_INGRESS:
|
|
+ case BPF_CGROUP_INET_EGRESS:
|
|
+ prog = bpf_prog_get_type(attr->attach_bpf_fd,
|
|
+ BPF_PROG_TYPE_CGROUP_SKB);
|
|
+ if (IS_ERR(prog))
|
|
+ return PTR_ERR(prog);
|
|
+
|
|
+ cgrp = cgroup_get_from_fd(attr->target_fd);
|
|
+ if (IS_ERR(cgrp)) {
|
|
+ bpf_prog_put(prog);
|
|
+ return PTR_ERR(cgrp);
|
|
+ }
|
|
+
|
|
+ ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
|
|
+ attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
|
|
+ if (ret)
|
|
+ bpf_prog_put(prog);
|
|
+ cgroup_put(cgrp);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#define BPF_PROG_DETACH_LAST_FIELD attach_type
|
|
+
|
|
+static int bpf_prog_detach(const union bpf_attr *attr)
|
|
+{
|
|
+ struct cgroup *cgrp;
|
|
+ int ret;
|
|
+
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
+ return -EPERM;
|
|
+
|
|
+ if (CHECK_ATTR(BPF_PROG_DETACH))
|
|
+ return -EINVAL;
|
|
+
|
|
+ switch (attr->attach_type) {
|
|
+ case BPF_CGROUP_INET_INGRESS:
|
|
+ case BPF_CGROUP_INET_EGRESS:
|
|
+ cgrp = cgroup_get_from_fd(attr->target_fd);
|
|
+ if (IS_ERR(cgrp))
|
|
+ return PTR_ERR(cgrp);
|
|
+
|
|
+ ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
|
|
+ cgroup_put(cgrp);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#endif /* CONFIG_CGROUP_BPF */
|
|
+
|
|
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
|
{
|
|
union bpf_attr attr = {};
|
|
@@ -841,6 +1010,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf
|
|
if (copy_from_user(&attr, uattr, size) != 0)
|
|
return -EFAULT;
|
|
|
|
+ err = security_bpf(cmd, &attr, size);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+
|
|
switch (cmd) {
|
|
case BPF_MAP_CREATE:
|
|
err = map_create(&attr);
|
|
@@ -866,6 +1039,16 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf
|
|
case BPF_OBJ_GET:
|
|
err = bpf_obj_get(&attr);
|
|
break;
|
|
+
|
|
+#ifdef CONFIG_CGROUP_BPF
|
|
+ case BPF_PROG_ATTACH:
|
|
+ err = bpf_prog_attach(&attr);
|
|
+ break;
|
|
+ case BPF_PROG_DETACH:
|
|
+ err = bpf_prog_detach(&attr);
|
|
+ break;
|
|
+#endif
|
|
+
|
|
default:
|
|
err = -EINVAL;
|
|
break;
|