Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_NETLINK_H |
| 3 | #define __LINUX_NETLINK_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
| 6 | #include <linux/capability.h> |
| 7 | #include <linux/skbuff.h> |
Pablo Neira Ayuso | abb17e6 | 2012-09-21 09:35:38 +0000 | [diff] [blame] | 8 | #include <linux/export.h> |
Eric W. Biederman | dbe9a41 | 2012-09-06 18:20:01 +0000 | [diff] [blame] | 9 | #include <net/scm.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 10 | #include <uapi/linux/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Ollie Wild | 56b49f4 | 2010-09-22 05:54:54 +0000 | [diff] [blame] | 12 | struct net; |
| 13 | |
Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 14 | static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) |
| 15 | { |
| 16 | return (struct nlmsghdr *)skb->data; |
| 17 | } |
| 18 | |
Patrick McHardy | 9652e93 | 2013-04-17 06:47:02 +0000 | [diff] [blame] | 19 | enum netlink_skb_flags { |
Eric W. Biederman | 2d7a85f | 2014-05-30 11:04:00 -0700 | [diff] [blame] | 20 | NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ |
Patrick McHardy | 9652e93 | 2013-04-17 06:47:02 +0000 | [diff] [blame] | 21 | }; |
| 22 | |
Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 23 | struct netlink_skb_parms { |
Eric W. Biederman | dbe9a41 | 2012-09-06 18:20:01 +0000 | [diff] [blame] | 24 | struct scm_creds creds; /* Skb credentials */ |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 25 | __u32 portid; |
Patrick McHardy | d629b83 | 2005-08-14 19:27:50 -0700 | [diff] [blame] | 26 | __u32 dst_group; |
Patrick McHardy | 9652e93 | 2013-04-17 06:47:02 +0000 | [diff] [blame] | 27 | __u32 flags; |
Patrick McHardy | e32123e | 2013-04-17 06:46:57 +0000 | [diff] [blame] | 28 | struct sock *sk; |
Nicolas Dichtel | 59324cf | 2015-05-07 11:02:53 +0200 | [diff] [blame] | 29 | bool nsid_is_set; |
| 30 | int nsid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | }; |
| 32 | |
| 33 | #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) |
| 34 | #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) |
| 35 | |
| 36 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 37 | void netlink_table_grab(void); |
| 38 | void netlink_table_ungrab(void); |
Johannes Berg | d136f1b | 2009-09-12 03:03:15 +0000 | [diff] [blame] | 39 | |
Pablo Neira Ayuso | 9785e10 | 2012-09-08 02:53:53 +0000 | [diff] [blame] | 40 | #define NL_CFG_F_NONROOT_RECV (1 << 0) |
| 41 | #define NL_CFG_F_NONROOT_SEND (1 << 1) |
| 42 | |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 43 | /* optional Netlink kernel configuration parameters */ |
| 44 | struct netlink_kernel_cfg { |
| 45 | unsigned int groups; |
David S. Miller | c9d2ea9 | 2012-09-23 02:09:23 -0400 | [diff] [blame] | 46 | unsigned int flags; |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 47 | void (*input)(struct sk_buff *skb); |
| 48 | struct mutex *cb_mutex; |
Johannes Berg | 023e2cf | 2014-12-23 21:00:06 +0100 | [diff] [blame] | 49 | int (*bind)(struct net *net, int group); |
| 50 | void (*unbind)(struct net *net, int group); |
Gao feng | da12c90 | 2013-06-06 14:49:11 +0800 | [diff] [blame] | 51 | bool (*compare)(struct net *net, struct sock *sk); |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 52 | }; |
| 53 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 54 | struct sock *__netlink_kernel_create(struct net *net, int unit, |
Pablo Neira Ayuso | 9f00d97 | 2012-09-08 02:53:54 +0000 | [diff] [blame] | 55 | struct module *module, |
| 56 | struct netlink_kernel_cfg *cfg); |
| 57 | static inline struct sock * |
| 58 | netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) |
| 59 | { |
| 60 | return __netlink_kernel_create(net, unit, THIS_MODULE, cfg); |
| 61 | } |
| 62 | |
Johannes Berg | ba0dc5f | 2017-04-12 14:34:06 +0200 | [diff] [blame] | 63 | /* this can be increased when necessary - don't expose to userland */ |
| 64 | #define NETLINK_MAX_COOKIE_LEN 20 |
| 65 | |
Johannes Berg | 2d4bc93 | 2017-04-12 14:34:04 +0200 | [diff] [blame] | 66 | /** |
| 67 | * struct netlink_ext_ack - netlink extended ACK report struct |
| 68 | * @_msg: message string to report - don't access directly, use |
| 69 | * %NL_SET_ERR_MSG |
| 70 | * @bad_attr: attribute with error |
Johannes Berg | ba0dc5f | 2017-04-12 14:34:06 +0200 | [diff] [blame] | 71 | * @cookie: cookie data to return to userspace (for success) |
| 72 | * @cookie_len: actual cookie data length |
Johannes Berg | 2d4bc93 | 2017-04-12 14:34:04 +0200 | [diff] [blame] | 73 | */ |
| 74 | struct netlink_ext_ack { |
| 75 | const char *_msg; |
| 76 | const struct nlattr *bad_attr; |
Johannes Berg | ba0dc5f | 2017-04-12 14:34:06 +0200 | [diff] [blame] | 77 | u8 cookie[NETLINK_MAX_COOKIE_LEN]; |
| 78 | u8 cookie_len; |
Johannes Berg | 2d4bc93 | 2017-04-12 14:34:04 +0200 | [diff] [blame] | 79 | }; |
| 80 | |
| 81 | /* Always use this macro, this allows later putting the |
| 82 | * message into a separate section or such for things |
| 83 | * like translation or listing all possible messages. |
| 84 | * Currently string formatting is not supported (due |
| 85 | * to the lack of an output buffer.) |
| 86 | */ |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 87 | #define NL_SET_ERR_MSG(extack, msg) do { \ |
Johannes Berg | 6311b7c | 2018-01-15 12:42:25 +0100 | [diff] [blame] | 88 | static const char __msg[] = msg; \ |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 89 | struct netlink_ext_ack *__extack = (extack); \ |
| 90 | \ |
| 91 | if (__extack) \ |
| 92 | __extack->_msg = __msg; \ |
Johannes Berg | 2d4bc93 | 2017-04-12 14:34:04 +0200 | [diff] [blame] | 93 | } while (0) |
| 94 | |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 95 | #define NL_SET_ERR_MSG_MOD(extack, msg) \ |
| 96 | NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) |
Jakub Kicinski | 45d9b37 | 2017-04-30 21:46:45 -0700 | [diff] [blame] | 97 | |
David Ahern | c3ab2b4 | 2017-05-21 10:12:03 -0600 | [diff] [blame] | 98 | #define NL_SET_BAD_ATTR(extack, attr) do { \ |
| 99 | if ((extack)) \ |
| 100 | (extack)->bad_attr = (attr); \ |
| 101 | } while (0) |
| 102 | |
David Ahern | 9ae2872 | 2017-05-27 16:19:28 -0600 | [diff] [blame] | 103 | #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ |
Johannes Berg | 6311b7c | 2018-01-15 12:42:25 +0100 | [diff] [blame] | 104 | static const char __msg[] = msg; \ |
David Ahern | 9ae2872 | 2017-05-27 16:19:28 -0600 | [diff] [blame] | 105 | struct netlink_ext_ack *__extack = (extack); \ |
| 106 | \ |
| 107 | if (__extack) { \ |
| 108 | __extack->_msg = __msg; \ |
| 109 | __extack->bad_attr = (attr); \ |
| 110 | } \ |
| 111 | } while (0) |
| 112 | |
Johannes Berg | 801f874 | 2018-08-23 10:48:13 +0200 | [diff] [blame] | 113 | static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, |
| 114 | u64 cookie) |
| 115 | { |
| 116 | u64 __cookie = cookie; |
| 117 | |
| 118 | memcpy(extack->cookie, &__cookie, sizeof(__cookie)); |
| 119 | extack->cookie_len = sizeof(__cookie); |
| 120 | } |
| 121 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 122 | void netlink_kernel_release(struct sock *sk); |
| 123 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups); |
| 124 | int netlink_change_ngroups(struct sock *sk, unsigned int groups); |
| 125 | void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); |
| 126 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, |
| 127 | const struct netlink_ext_ack *extack); |
| 128 | int netlink_has_listeners(struct sock *sk, unsigned int group); |
Jakub Kicinski | 59c2805 | 2019-01-18 10:46:13 -0800 | [diff] [blame] | 129 | bool netlink_strict_get_check(struct sk_buff *skb); |
Daniel Borkmann | 6bb0fef | 2015-09-10 02:10:57 +0200 | [diff] [blame] | 130 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 131 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); |
| 132 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, |
| 133 | __u32 group, gfp_t allocation); |
| 134 | int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, |
| 135 | __u32 portid, __u32 group, gfp_t allocation, |
| 136 | int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), |
| 137 | void *filter_data); |
| 138 | int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); |
| 139 | int netlink_register_notifier(struct notifier_block *nb); |
| 140 | int netlink_unregister_notifier(struct notifier_block *nb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
| 142 | /* finegrained unicast helpers: */ |
| 143 | struct sock *netlink_getsockbyfilp(struct file *filp); |
Denis V. Lunev | 9457afe | 2008-06-05 11:23:39 -0700 | [diff] [blame] | 144 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, |
Patrick McHardy | c3d8d1e | 2007-11-07 02:42:09 -0800 | [diff] [blame] | 145 | long *timeo, struct sock *ssk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | void netlink_detachskb(struct sock *sk, struct sk_buff *skb); |
Denis V. Lunev | 7ee015e | 2007-10-10 21:14:03 -0700 | [diff] [blame] | 147 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
Pablo Neira | 3a36515 | 2013-06-28 03:04:23 +0200 | [diff] [blame] | 149 | static inline struct sk_buff * |
| 150 | netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 151 | { |
| 152 | struct sk_buff *nskb; |
| 153 | |
| 154 | nskb = skb_clone(skb, gfp_mask); |
| 155 | if (!nskb) |
| 156 | return NULL; |
| 157 | |
| 158 | /* This is a large skb, set destructor callback to release head */ |
| 159 | if (is_vmalloc_addr(skb->head)) |
| 160 | nskb->destructor = skb->destructor; |
| 161 | |
| 162 | return nskb; |
| 163 | } |
| 164 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | /* |
| 166 | * skb should fit one page. This choice is good for headerless malloc. |
David S. Miller | fc910a2 | 2007-03-25 20:27:59 -0700 | [diff] [blame] | 167 | * But we should limit to 8K so that userspace does not have to |
| 168 | * use enormous buffer sizes on recvmsg() calls just to avoid |
| 169 | * MSG_TRUNC when PAGE_SIZE is very large. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | */ |
David S. Miller | fc910a2 | 2007-03-25 20:27:59 -0700 | [diff] [blame] | 171 | #if PAGE_SIZE < 8192UL |
| 172 | #define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) |
| 173 | #else |
| 174 | #define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) |
| 175 | #endif |
| 176 | |
Thomas Graf | 339bf98 | 2006-11-10 14:10:15 -0800 | [diff] [blame] | 177 | #define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
| 179 | |
Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 180 | struct netlink_callback { |
Patrick McHardy | 3a6c2b4 | 2009-08-25 16:07:40 +0200 | [diff] [blame] | 181 | struct sk_buff *skb; |
| 182 | const struct nlmsghdr *nlh; |
| 183 | int (*dump)(struct sk_buff * skb, |
| 184 | struct netlink_callback *cb); |
| 185 | int (*done)(struct netlink_callback *cb); |
Pablo Neira Ayuso | 7175c88 | 2012-02-24 14:30:16 +0000 | [diff] [blame] | 186 | void *data; |
Gao feng | 6dc878a | 2012-10-04 20:15:48 +0000 | [diff] [blame] | 187 | /* the module that dump function belong to */ |
| 188 | struct module *module; |
David Ahern | 4a19edb | 2018-10-07 20:16:22 -0700 | [diff] [blame] | 189 | struct netlink_ext_ack *extack; |
Greg Rose | c7ac867 | 2011-06-10 01:27:09 +0000 | [diff] [blame] | 190 | u16 family; |
| 191 | u16 min_dump_alloc; |
David Ahern | 89d3552 | 2018-10-07 20:16:27 -0700 | [diff] [blame] | 192 | bool strict_check; |
David Ahern | 22e6c58 | 2018-10-15 18:56:41 -0700 | [diff] [blame] | 193 | u16 answer_flags; |
Johannes Berg | 670dc28 | 2011-06-20 13:40:46 +0200 | [diff] [blame] | 194 | unsigned int prev_seq, seq; |
Patrick McHardy | 3a6c2b4 | 2009-08-25 16:07:40 +0200 | [diff] [blame] | 195 | long args[6]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | }; |
| 197 | |
Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 198 | struct netlink_notify { |
Eric W. Biederman | b4b5102 | 2007-09-12 13:05:38 +0200 | [diff] [blame] | 199 | struct net *net; |
Richard Weinberger | 0392d09 | 2015-04-13 00:52:35 +0200 | [diff] [blame] | 200 | u32 portid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | int protocol; |
| 202 | }; |
| 203 | |
Denys Vlasenko | a46621a | 2012-01-30 15:22:06 -0500 | [diff] [blame] | 204 | struct nlmsghdr * |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 205 | __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 207 | struct netlink_dump_control { |
Tom Herbert | fc9e50f | 2015-12-15 15:41:37 -0800 | [diff] [blame] | 208 | int (*start)(struct netlink_callback *); |
Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 209 | int (*dump)(struct sk_buff *skb, struct netlink_callback *); |
Gao feng | 6dc878a | 2012-10-04 20:15:48 +0000 | [diff] [blame] | 210 | int (*done)(struct netlink_callback *); |
Pablo Neira Ayuso | 7175c88 | 2012-02-24 14:30:16 +0000 | [diff] [blame] | 211 | void *data; |
Gao feng | 6dc878a | 2012-10-04 20:15:48 +0000 | [diff] [blame] | 212 | struct module *module; |
Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 213 | u16 min_dump_alloc; |
| 214 | }; |
| 215 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 216 | int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
Gao feng | 6dc878a | 2012-10-04 20:15:48 +0000 | [diff] [blame] | 217 | const struct nlmsghdr *nlh, |
| 218 | struct netlink_dump_control *control); |
| 219 | static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
| 220 | const struct nlmsghdr *nlh, |
| 221 | struct netlink_dump_control *control) |
| 222 | { |
| 223 | if (!control->module) |
| 224 | control->module = THIS_MODULE; |
| 225 | |
| 226 | return __netlink_dump_start(ssk, skb, nlh, control); |
| 227 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
Daniel Borkmann | bcbde0d | 2013-06-21 19:38:07 +0200 | [diff] [blame] | 229 | struct netlink_tap { |
| 230 | struct net_device *dev; |
| 231 | struct module *module; |
| 232 | struct list_head list; |
| 233 | }; |
| 234 | |
Stephen Hemminger | aa9d6e0 | 2018-12-20 09:52:28 -0800 | [diff] [blame] | 235 | int netlink_add_tap(struct netlink_tap *nt); |
| 236 | int netlink_remove_tap(struct netlink_tap *nt); |
Daniel Borkmann | bcbde0d | 2013-06-21 19:38:07 +0200 | [diff] [blame] | 237 | |
Eric W. Biederman | aa4cf94 | 2014-04-23 14:28:03 -0700 | [diff] [blame] | 238 | bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, |
| 239 | struct user_namespace *ns, int cap); |
| 240 | bool netlink_ns_capable(const struct sk_buff *skb, |
| 241 | struct user_namespace *ns, int cap); |
| 242 | bool netlink_capable(const struct sk_buff *skb, int cap); |
| 243 | bool netlink_net_capable(const struct sk_buff *skb, int cap); |
| 244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | #endif /* __LINUX_NETLINK_H */ |