blob: 3d4d99a70b80f5910322a01e76c99c863f498743 [file] [log] [blame]
Jiri Pirko007f7902014-11-28 14:34:17 +01001/*
2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
Scott Feldmanf8f21472015-03-09 13:59:09 -07004 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
Jiri Pirko007f7902014-11-28 14:34:17 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/init.h>
Jiri Pirko03bf0c22015-01-15 23:49:36 +010015#include <linux/mutex.h>
16#include <linux/notifier.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010017#include <linux/netdevice.h>
Scott Feldman5e8d9042015-03-05 21:21:15 -080018#include <net/ip_fib.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010019#include <net/switchdev.h>
20
21/**
Scott Feldman30943332015-05-10 09:47:48 -070022 * switchdev_port_attr_get - Get port attribute
23 *
24 * @dev: port device
25 * @attr: attribute to get
26 */
27int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
28{
29 const struct switchdev_ops *ops = dev->switchdev_ops;
30 struct net_device *lower_dev;
31 struct list_head *iter;
32 struct switchdev_attr first = {
33 .id = SWITCHDEV_ATTR_UNDEFINED
34 };
35 int err = -EOPNOTSUPP;
36
37 if (ops && ops->switchdev_port_attr_get)
38 return ops->switchdev_port_attr_get(dev, attr);
39
40 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
41 return err;
42
43 /* Switch device port(s) may be stacked under
44 * bond/team/vlan dev, so recurse down to get attr on
45 * each port. Return -ENODATA if attr values don't
46 * compare across ports.
47 */
48
49 netdev_for_each_lower_dev(dev, lower_dev, iter) {
50 err = switchdev_port_attr_get(lower_dev, attr);
51 if (err)
52 break;
53 if (first.id == SWITCHDEV_ATTR_UNDEFINED)
54 first = *attr;
55 else if (memcmp(&first, attr, sizeof(*attr)))
56 return -ENODATA;
57 }
58
59 return err;
60}
61EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
62
63static int __switchdev_port_attr_set(struct net_device *dev,
64 struct switchdev_attr *attr)
65{
66 const struct switchdev_ops *ops = dev->switchdev_ops;
67 struct net_device *lower_dev;
68 struct list_head *iter;
69 int err = -EOPNOTSUPP;
70
71 if (ops && ops->switchdev_port_attr_set)
72 return ops->switchdev_port_attr_set(dev, attr);
73
74 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
75 return err;
76
77 /* Switch device port(s) may be stacked under
78 * bond/team/vlan dev, so recurse down to set attr on
79 * each port.
80 */
81
82 netdev_for_each_lower_dev(dev, lower_dev, iter) {
83 err = __switchdev_port_attr_set(lower_dev, attr);
84 if (err)
85 break;
86 }
87
88 return err;
89}
90
91struct switchdev_attr_set_work {
92 struct work_struct work;
93 struct net_device *dev;
94 struct switchdev_attr attr;
95};
96
97static void switchdev_port_attr_set_work(struct work_struct *work)
98{
99 struct switchdev_attr_set_work *asw =
100 container_of(work, struct switchdev_attr_set_work, work);
101 int err;
102
103 rtnl_lock();
104 err = switchdev_port_attr_set(asw->dev, &asw->attr);
105 BUG_ON(err);
106 rtnl_unlock();
107
108 dev_put(asw->dev);
109 kfree(work);
110}
111
112static int switchdev_port_attr_set_defer(struct net_device *dev,
113 struct switchdev_attr *attr)
114{
115 struct switchdev_attr_set_work *asw;
116
117 asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
118 if (!asw)
119 return -ENOMEM;
120
121 INIT_WORK(&asw->work, switchdev_port_attr_set_work);
122
123 dev_hold(dev);
124 asw->dev = dev;
125 memcpy(&asw->attr, attr, sizeof(asw->attr));
126
127 schedule_work(&asw->work);
128
129 return 0;
130}
131
132/**
133 * switchdev_port_attr_set - Set port attribute
134 *
135 * @dev: port device
136 * @attr: attribute to set
137 *
138 * Use a 2-phase prepare-commit transaction model to ensure
139 * system is not left in a partially updated state due to
140 * failure from driver/device.
141 */
142int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
143{
144 int err;
145
146 if (!rtnl_is_locked()) {
147 /* Running prepare-commit transaction across stacked
148 * devices requires nothing moves, so if rtnl_lock is
149 * not held, schedule a worker thread to hold rtnl_lock
150 * while setting attr.
151 */
152
153 return switchdev_port_attr_set_defer(dev, attr);
154 }
155
156 /* Phase I: prepare for attr set. Driver/device should fail
157 * here if there are going to be issues in the commit phase,
158 * such as lack of resources or support. The driver/device
159 * should reserve resources needed for the commit phase here,
160 * but should not commit the attr.
161 */
162
163 attr->trans = SWITCHDEV_TRANS_PREPARE;
164 err = __switchdev_port_attr_set(dev, attr);
165 if (err) {
166 /* Prepare phase failed: abort the transaction. Any
167 * resources reserved in the prepare phase are
168 * released.
169 */
170
171 attr->trans = SWITCHDEV_TRANS_ABORT;
172 __switchdev_port_attr_set(dev, attr);
173
174 return err;
175 }
176
177 /* Phase II: commit attr set. This cannot fail as a fault
178 * of driver/device. If it does, it's a bug in the driver/device
179 * because the driver said everythings was OK in phase I.
180 */
181
182 attr->trans = SWITCHDEV_TRANS_COMMIT;
183 err = __switchdev_port_attr_set(dev, attr);
184 BUG_ON(err);
185
186 return err;
187}
188EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
189
Scott Feldman491d0f12015-05-10 09:47:52 -0700190int __switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
191{
192 const struct switchdev_ops *ops = dev->switchdev_ops;
193 struct net_device *lower_dev;
194 struct list_head *iter;
195 int err = -EOPNOTSUPP;
196
197 if (ops && ops->switchdev_port_obj_add)
198 return ops->switchdev_port_obj_add(dev, obj);
199
200 /* Switch device port(s) may be stacked under
201 * bond/team/vlan dev, so recurse down to add object on
202 * each port.
203 */
204
205 netdev_for_each_lower_dev(dev, lower_dev, iter) {
206 err = __switchdev_port_obj_add(lower_dev, obj);
207 if (err)
208 break;
209 }
210
211 return err;
212}
213
214/**
215 * switchdev_port_obj_add - Add port object
216 *
217 * @dev: port device
218 * @obj: object to add
219 *
220 * Use a 2-phase prepare-commit transaction model to ensure
221 * system is not left in a partially updated state due to
222 * failure from driver/device.
223 *
224 * rtnl_lock must be held.
225 */
226int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
227{
228 int err;
229
230 ASSERT_RTNL();
231
232 /* Phase I: prepare for obj add. Driver/device should fail
233 * here if there are going to be issues in the commit phase,
234 * such as lack of resources or support. The driver/device
235 * should reserve resources needed for the commit phase here,
236 * but should not commit the obj.
237 */
238
239 obj->trans = SWITCHDEV_TRANS_PREPARE;
240 err = __switchdev_port_obj_add(dev, obj);
241 if (err) {
242 /* Prepare phase failed: abort the transaction. Any
243 * resources reserved in the prepare phase are
244 * released.
245 */
246
247 obj->trans = SWITCHDEV_TRANS_ABORT;
248 __switchdev_port_obj_add(dev, obj);
249
250 return err;
251 }
252
253 /* Phase II: commit obj add. This cannot fail as a fault
254 * of driver/device. If it does, it's a bug in the driver/device
255 * because the driver said everythings was OK in phase I.
256 */
257
258 obj->trans = SWITCHDEV_TRANS_COMMIT;
259 err = __switchdev_port_obj_add(dev, obj);
260 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
261
262 return err;
263}
264EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
265
266/**
267 * switchdev_port_obj_del - Delete port object
268 *
269 * @dev: port device
270 * @obj: object to delete
271 */
272int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
273{
274 const struct switchdev_ops *ops = dev->switchdev_ops;
275 struct net_device *lower_dev;
276 struct list_head *iter;
277 int err = -EOPNOTSUPP;
278
279 if (ops && ops->switchdev_port_obj_del)
280 return ops->switchdev_port_obj_del(dev, obj);
281
282 /* Switch device port(s) may be stacked under
283 * bond/team/vlan dev, so recurse down to delete object on
284 * each port.
285 */
286
287 netdev_for_each_lower_dev(dev, lower_dev, iter) {
288 err = switchdev_port_obj_del(lower_dev, obj);
289 if (err)
290 break;
291 }
292
293 return err;
294}
295EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
296
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700297static DEFINE_MUTEX(switchdev_mutex);
298static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100299
300/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700301 * register_switchdev_notifier - Register notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100302 * @nb: notifier_block
303 *
304 * Register switch device notifier. This should be used by code
305 * which needs to monitor events happening in particular device.
306 * Return values are same as for atomic_notifier_chain_register().
307 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700308int register_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100309{
310 int err;
311
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700312 mutex_lock(&switchdev_mutex);
313 err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
314 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100315 return err;
316}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700317EXPORT_SYMBOL_GPL(register_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100318
319/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700320 * unregister_switchdev_notifier - Unregister notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100321 * @nb: notifier_block
322 *
323 * Unregister switch device notifier.
324 * Return values are same as for atomic_notifier_chain_unregister().
325 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700326int unregister_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100327{
328 int err;
329
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700330 mutex_lock(&switchdev_mutex);
331 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
332 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100333 return err;
334}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700335EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100336
337/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700338 * call_switchdev_notifiers - Call notifiers
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100339 * @val: value passed unmodified to notifier function
340 * @dev: port device
341 * @info: notifier information data
342 *
343 * Call all network notifier blocks. This should be called by driver
344 * when it needs to propagate hardware event.
345 * Return values are same as for atomic_notifier_call_chain().
346 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700347int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
348 struct switchdev_notifier_info *info)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100349{
350 int err;
351
352 info->dev = dev;
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700353 mutex_lock(&switchdev_mutex);
354 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
355 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100356 return err;
357}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700358EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800359
360/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700361 * switchdev_port_bridge_setlink - Notify switch device port of bridge
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800362 * port attributes
363 *
364 * @dev: port device
365 * @nlh: netlink msg with bridge port attributes
366 * @flags: bridge setlink flags
367 *
368 * Notify switch device port of bridge port attributes
369 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700370int switchdev_port_bridge_setlink(struct net_device *dev,
371 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800372{
373 const struct net_device_ops *ops = dev->netdev_ops;
374
375 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
376 return 0;
377
378 if (!ops->ndo_bridge_setlink)
379 return -EOPNOTSUPP;
380
381 return ops->ndo_bridge_setlink(dev, nlh, flags);
382}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700383EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800384
385/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700386 * switchdev_port_bridge_dellink - Notify switch device port of bridge
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800387 * port attribute delete
388 *
389 * @dev: port device
390 * @nlh: netlink msg with bridge port attributes
391 * @flags: bridge setlink flags
392 *
393 * Notify switch device port of bridge port attribute delete
394 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700395int switchdev_port_bridge_dellink(struct net_device *dev,
396 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800397{
398 const struct net_device_ops *ops = dev->netdev_ops;
399
400 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
401 return 0;
402
403 if (!ops->ndo_bridge_dellink)
404 return -EOPNOTSUPP;
405
406 return ops->ndo_bridge_dellink(dev, nlh, flags);
407}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700408EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800409
410/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700411 * ndo_dflt_switchdev_port_bridge_setlink - default ndo bridge setlink
412 * op for master devices
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800413 *
414 * @dev: port device
415 * @nlh: netlink msg with bridge port attributes
416 * @flags: bridge setlink flags
417 *
418 * Notify master device slaves of bridge port attributes
419 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700420int ndo_dflt_switchdev_port_bridge_setlink(struct net_device *dev,
421 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800422{
423 struct net_device *lower_dev;
424 struct list_head *iter;
425 int ret = 0, err = 0;
426
427 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
428 return ret;
429
430 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700431 err = switchdev_port_bridge_setlink(lower_dev, nlh, flags);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800432 if (err && err != -EOPNOTSUPP)
433 ret = err;
434 }
435
436 return ret;
437}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700438EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_setlink);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800439
440/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700441 * ndo_dflt_switchdev_port_bridge_dellink - default ndo bridge dellink
442 * op for master devices
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800443 *
444 * @dev: port device
445 * @nlh: netlink msg with bridge port attributes
446 * @flags: bridge dellink flags
447 *
448 * Notify master device slaves of bridge port attribute deletes
449 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700450int ndo_dflt_switchdev_port_bridge_dellink(struct net_device *dev,
451 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800452{
453 struct net_device *lower_dev;
454 struct list_head *iter;
455 int ret = 0, err = 0;
456
457 if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
458 return ret;
459
460 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700461 err = switchdev_port_bridge_dellink(lower_dev, nlh, flags);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800462 if (err && err != -EOPNOTSUPP)
463 ret = err;
464 }
465
466 return ret;
467}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700468EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_dellink);
Scott Feldman5e8d9042015-03-05 21:21:15 -0800469
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700470static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800471{
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700472 const struct switchdev_ops *ops = dev->switchdev_ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800473 struct net_device *lower_dev;
474 struct net_device *port_dev;
475 struct list_head *iter;
476
477 /* Recusively search down until we find a sw port dev.
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700478 * (A sw port dev supports switchdev_port_attr_get).
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800479 */
480
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700481 if (ops && ops->switchdev_port_attr_get)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800482 return dev;
483
484 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700485 port_dev = switchdev_get_lowest_dev(lower_dev);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800486 if (port_dev)
487 return port_dev;
488 }
489
490 return NULL;
491}
492
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700493static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800494{
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700495 struct switchdev_attr attr = {
496 .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
497 };
498 struct switchdev_attr prev_attr;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800499 struct net_device *dev = NULL;
500 int nhsel;
501
502 /* For this route, all nexthop devs must be on the same switch. */
503
504 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
505 const struct fib_nh *nh = &fi->fib_nh[nhsel];
506
507 if (!nh->nh_dev)
508 return NULL;
509
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700510 dev = switchdev_get_lowest_dev(nh->nh_dev);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800511 if (!dev)
512 return NULL;
513
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700514 if (switchdev_port_attr_get(dev, &attr))
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800515 return NULL;
516
517 if (nhsel > 0) {
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700518 if (prev_attr.ppid.id_len != attr.ppid.id_len)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800519 return NULL;
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700520 if (memcmp(prev_attr.ppid.id, attr.ppid.id,
521 attr.ppid.id_len))
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800522 return NULL;
523 }
524
Scott Feldmanf8e20a92015-05-10 09:47:49 -0700525 prev_attr = attr;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800526 }
527
528 return dev;
529}
530
Scott Feldman5e8d9042015-03-05 21:21:15 -0800531/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700532 * switchdev_fib_ipv4_add - Add IPv4 route entry to switch
Scott Feldman5e8d9042015-03-05 21:21:15 -0800533 *
534 * @dst: route's IPv4 destination address
535 * @dst_len: destination address length (prefix length)
536 * @fi: route FIB info structure
537 * @tos: route TOS
538 * @type: route type
Scott Feldmanf8f21472015-03-09 13:59:09 -0700539 * @nlflags: netlink flags passed in (NLM_F_*)
Scott Feldman5e8d9042015-03-05 21:21:15 -0800540 * @tb_id: route table ID
541 *
542 * Add IPv4 route entry to switch device.
543 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700544int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
545 u8 tos, u8 type, u32 nlflags, u32 tb_id)
Scott Feldman5e8d9042015-03-05 21:21:15 -0800546{
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800547 struct net_device *dev;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700548 const struct switchdev_ops *ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800549 int err = 0;
550
Scott Feldman8e05fd72015-03-05 21:21:19 -0800551 /* Don't offload route if using custom ip rules or if
552 * IPv4 FIB offloading has been disabled completely.
553 */
554
Scott Feldmane1315db2015-03-06 01:14:36 -0800555#ifdef CONFIG_IP_MULTIPLE_TABLES
556 if (fi->fib_net->ipv4.fib_has_custom_rules)
557 return 0;
558#endif
559
560 if (fi->fib_net->ipv4.fib_offload_disabled)
Scott Feldman104616e2015-03-05 21:21:16 -0800561 return 0;
562
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700563 dev = switchdev_get_dev_by_nhs(fi);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800564 if (!dev)
565 return 0;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700566 ops = dev->switchdev_ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800567
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700568 if (ops->switchdev_fib_ipv4_add) {
569 err = ops->switchdev_fib_ipv4_add(dev, htonl(dst), dst_len,
570 fi, tos, type, nlflags,
571 tb_id);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800572 if (!err)
573 fi->fib_flags |= RTNH_F_EXTERNAL;
574 }
575
576 return err;
Scott Feldman5e8d9042015-03-05 21:21:15 -0800577}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700578EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
Scott Feldman5e8d9042015-03-05 21:21:15 -0800579
580/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700581 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
Scott Feldman5e8d9042015-03-05 21:21:15 -0800582 *
583 * @dst: route's IPv4 destination address
584 * @dst_len: destination address length (prefix length)
585 * @fi: route FIB info structure
586 * @tos: route TOS
587 * @type: route type
588 * @tb_id: route table ID
589 *
590 * Delete IPv4 route entry from switch device.
591 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700592int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
593 u8 tos, u8 type, u32 tb_id)
Scott Feldman5e8d9042015-03-05 21:21:15 -0800594{
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800595 struct net_device *dev;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700596 const struct switchdev_ops *ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800597 int err = 0;
598
599 if (!(fi->fib_flags & RTNH_F_EXTERNAL))
600 return 0;
601
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700602 dev = switchdev_get_dev_by_nhs(fi);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800603 if (!dev)
604 return 0;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700605 ops = dev->switchdev_ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800606
Jiri Pirko9d47c0a2015-05-10 09:47:47 -0700607 if (ops->switchdev_fib_ipv4_del) {
608 err = ops->switchdev_fib_ipv4_del(dev, htonl(dst), dst_len,
609 fi, tos, type, tb_id);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -0800610 if (!err)
611 fi->fib_flags &= ~RTNH_F_EXTERNAL;
612 }
613
614 return err;
Scott Feldman5e8d9042015-03-05 21:21:15 -0800615}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700616EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
Scott Feldman8e05fd72015-03-05 21:21:19 -0800617
618/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700619 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
Scott Feldman8e05fd72015-03-05 21:21:19 -0800620 *
621 * @fi: route FIB info structure
622 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700623void switchdev_fib_ipv4_abort(struct fib_info *fi)
Scott Feldman8e05fd72015-03-05 21:21:19 -0800624{
625 /* There was a problem installing this route to the offload
626 * device. For now, until we come up with more refined
627 * policy handling, abruptly end IPv4 fib offloading for
628 * for entire net by flushing offload device(s) of all
629 * IPv4 routes, and mark IPv4 fib offloading broken from
630 * this point forward.
631 */
632
633 fib_flush_external(fi->fib_net);
634 fi->fib_net->ipv4.fib_offload_disabled = true;
635}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700636EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);