diff options
Diffstat (limited to 'net/dsa/slave.c')
-rw-r--r-- | net/dsa/slave.c | 569 |
1 files changed, 390 insertions, 179 deletions
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3bc5ca40c9fb..491e3761b5f4 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -17,6 +17,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <linux/if_bridge.h> +#include <linux/if_hsr.h> #include <linux/netpoll.h> #include <linux/ptp_classify.h> @@ -68,8 +69,11 @@ static int dsa_slave_open(struct net_device *dev) struct dsa_port *dp = dsa_slave_to_port(dev); int err; - if (!(master->flags & IFF_UP)) - return -ENETDOWN; + err = dev_open(master, NULL); + if (err < 0) { + netdev_err(dev, "failed to open master %s\n", master->name); + goto out; + } if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { err = dev_uc_add(master, dev->dev_addr); @@ -269,31 +273,34 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int dsa_slave_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, - struct switchdev_trans *trans) + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_slave_to_port(dev); int ret; + if (!dsa_port_offloads_netdev(dp, attr->orig_dev)) + return -EOPNOTSUPP; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - ret = dsa_port_set_state(dp, attr->u.stp_state, trans); + ret = dsa_port_set_state(dp, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, - trans); + extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans); + ret = dsa_port_ageing_time(dp, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, - trans); + extack); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans); + ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: - ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans); + ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack); break; default: ret = -EOPNOTSUPP; @@ -318,7 +325,7 @@ dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, continue; vid = vlan_dev_vlan_id(upper_dev); - if (vid >= vlan->vid_begin && vid <= vlan->vid_end) + if (vid == vlan->vid) return -EBUSY; } @@ -327,33 +334,38 @@ dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, static int dsa_slave_vlan_add(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans) + struct netlink_ext_ack *extack) { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); struct switchdev_obj_port_vlan vlan; - int vid, err; + int err; - if (obj->orig_dev != dev) + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) return -EOPNOTSUPP; - if (dsa_port_skip_vlan_configuration(dp)) + if (dsa_port_skip_vlan_configuration(dp)) { + NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); return 0; + } vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); /* Deny adding a bridge VLAN when there is already an 802.1Q upper with * the same VID. */ - if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) { + if (br_vlan_enabled(dp->bridge_dev)) { rcu_read_lock(); err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan); rcu_read_unlock(); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Port already has a VLAN upper with this VID"); return err; + } } - err = dsa_port_vlan_add(dp, &vlan, trans); + err = dsa_port_vlan_add(dp, &vlan, extack); if (err) return err; @@ -363,47 +375,45 @@ static int dsa_slave_vlan_add(struct net_device *dev, */ vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; - err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans); + err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack); if (err) return err; - for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) { - err = vlan_vid_add(master, htons(ETH_P_8021Q), vid); - if (err) - return err; - } - - return 0; + return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid); } static int dsa_slave_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans, struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_slave_to_port(dev); int err; - /* For the prepare phase, ensure the full set of changes is feasable in - * one go in order to signal a failure properly. If an operation is not - * supported, return -EOPNOTSUPP. - */ - switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_MDB: - if (obj->orig_dev != dev) + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) return -EOPNOTSUPP; - err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); + err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_HOST_MDB: /* DSA can directly translate this to a normal MDB add, * but on the CPU port. */ - err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj), - trans); + err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dsa_slave_vlan_add(dev, obj, trans); + err = dsa_slave_vlan_add(dev, obj, extack); + break; + case SWITCHDEV_OBJ_ID_MRP: + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + err = dsa_port_mrp_add_ring_role(dp, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); break; default: err = -EOPNOTSUPP; @@ -419,9 +429,9 @@ static int dsa_slave_vlan_del(struct net_device *dev, struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); struct switchdev_obj_port_vlan *vlan; - int vid, err; + int err; - if (obj->orig_dev != dev) + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) return -EOPNOTSUPP; if (dsa_port_skip_vlan_configuration(dp)) @@ -436,8 +446,7 @@ static int dsa_slave_vlan_del(struct net_device *dev, if (err) return err; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) - vlan_vid_del(master, htons(ETH_P_8021Q), vid); + vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); return 0; } @@ -450,7 +459,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_MDB: - if (obj->orig_dev != dev) + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -463,6 +472,17 @@ static int dsa_slave_port_obj_del(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_VLAN: err = dsa_slave_vlan_del(dev, obj); break; + case SWITCHDEV_OBJ_ID_MRP: + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + if (!dsa_port_offloads_netdev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + err = dsa_port_mrp_del_ring_role(dp, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; default: err = -EOPNOTSUPP; break; @@ -522,10 +542,10 @@ static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, if (!clone) return; - DSA_SKB_CB(skb)->clone = clone; - - if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) + if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) { + DSA_SKB_CB(skb)->clone = clone; return; + } kfree_skb(clone); } @@ -548,17 +568,36 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) } EXPORT_SYMBOL_GPL(dsa_enqueue_skb); +static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) +{ + int needed_headroom = dev->needed_headroom; + int needed_tailroom = dev->needed_tailroom; + + /* For tail taggers, we need to pad short frames ourselves, to ensure + * that the tail tag does not fail at its role of being at the end of + * the packet, once the master interface pads the frame. Account for + * that pad length here, and pad later. + */ + if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) + needed_tailroom += ETH_ZLEN - skb->len; + /* skb_headroom() returns unsigned int... */ + needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); + needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); + + if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) + /* No reallocation needed, yay! */ + return 0; + + return pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); +} + static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); - struct pcpu_sw_netstats *s; struct sk_buff *nskb; - s = this_cpu_ptr(p->stats64); - u64_stats_update_begin(&s->syncp); - s->tx_packets++; - s->tx_bytes += skb->len; - u64_stats_update_end(&s->syncp); + dev_sw_netstats_tx_add(dev, 1, skb->len); DSA_SKB_CB(skb)->clone = NULL; @@ -567,6 +606,17 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) */ dsa_skb_tx_timestamp(p, skb); + if (dsa_realloc_skb(skb, dev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* needed_tailroom should still be 'warm' in the cache line from + * dsa_realloc_skb(), which has also ensured that padding is safe. + */ + if (dev->needed_tailroom) + eth_skb_pad(skb); + /* Transmit function may have to reallocate the original SKB, * in which case it must have freed it. Only free it here on error. */ @@ -679,7 +729,6 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, uint64_t *data) { struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = dp->ds; struct pcpu_sw_netstats *s; unsigned int start; @@ -688,7 +737,7 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, for_each_possible_cpu(i) { u64 tx_packets, tx_bytes, rx_packets, rx_bytes; - s = per_cpu_ptr(p->stats64, i); + s = per_cpu_ptr(dev->tstats, i); do { start = u64_stats_fetch_begin_irq(&s->syncp); tx_packets = s->tx_packets; @@ -1217,15 +1266,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, return ds->ops->port_setup_tc(ds, dp->index, type, type_data); } -static void dsa_slave_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - - netdev_stats_to_stats64(stats, &dev->stats); - dev_fetch_sw_netstats(stats, p->stats64); -} - static int dsa_slave_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc, u32 *rule_locs) { @@ -1269,35 +1309,29 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, struct dsa_port *dp = dsa_slave_to_port(dev); struct switchdev_obj_port_vlan vlan = { .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .vid_begin = vid, - .vid_end = vid, + .vid = vid, /* This API only allows programming tagged, non-PVID VIDs */ .flags = 0, }; - struct switchdev_trans trans; + struct netlink_ext_ack extack = {0}; int ret; /* User port... */ - trans.ph_prepare = true; - ret = dsa_port_vlan_add(dp, &vlan, &trans); - if (ret) - return ret; - - trans.ph_prepare = false; - ret = dsa_port_vlan_add(dp, &vlan, &trans); - if (ret) + ret = dsa_port_vlan_add(dp, &vlan, &extack); + if (ret) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); return ret; + } /* And CPU port... */ - trans.ph_prepare = true; - ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans); - if (ret) - return ret; - - trans.ph_prepare = false; - ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans); - if (ret) + ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack); + if (ret) { + if (extack._msg) + netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, + extack._msg); return ret; + } return vlan_vid_add(master, proto, vid); } @@ -1308,8 +1342,7 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); struct switchdev_obj_port_vlan vlan = { - .vid_begin = vid, - .vid_end = vid, + .vid = vid, /* This API only allows programming tagged, non-PVID VIDs */ .flags = 0, }; @@ -1437,7 +1470,7 @@ out: dsa_hw_port_list_free(&hw_port_list); } -static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) +int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); @@ -1555,20 +1588,20 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { }; /* legacy way, bypassing the bridge *****************************************/ -int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid, - u16 flags, - struct netlink_ext_ack *extack) +static int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_slave_to_port(dev); return dsa_port_fdb_add(dp, addr, vid); } -int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) +static int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -1582,6 +1615,18 @@ static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev) return dp->ds->devlink ? &dp->devlink_port : NULL; } +static void dsa_slave_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_stats64) + ds->ops->get_stats64(ds, dp->index, s); + else + dev_get_tstats64(dev, s); +} + static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_open = dsa_slave_open, .ndo_stop = dsa_slave_close, @@ -1703,6 +1748,27 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) return ret; } +void dsa_slave_setup_tagger(struct net_device *slave) +{ + struct dsa_port *dp = dsa_slave_to_port(slave); + struct dsa_slave_priv *p = netdev_priv(slave); + const struct dsa_port *cpu_dp = dp->cpu_dp; + struct net_device *master = cpu_dp->master; + + if (cpu_dp->tag_ops->tail_tag) + slave->needed_tailroom = cpu_dp->tag_ops->overhead; + else + slave->needed_headroom = cpu_dp->tag_ops->overhead; + /* Try to save one extra realloc later in the TX path (in the master) + * by also inheriting the master's needed headroom and tailroom. + * The 8021q driver also does this. + */ + slave->needed_headroom += master->needed_headroom; + slave->needed_tailroom += master->needed_tailroom; + + p->xmit = cpu_dp->tag_ops->xmit; +} + static struct lock_class_key dsa_slave_netdev_xmit_lock_key; static void dsa_slave_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, @@ -1744,20 +1810,6 @@ int dsa_slave_resume(struct net_device *slave_dev) return 0; } -static void dsa_slave_notify(struct net_device *dev, unsigned long val) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_notifier_register_info rinfo = { - .switch_number = dp->ds->index, - .port_number = dp->index, - .master = master, - .info.dev = dev, - }; - - call_dsa_notifiers(val, dev, &rinfo.info); -} - int dsa_slave_create(struct dsa_port *port) { const struct dsa_port *cpu_dp = port->cpu_dp; @@ -1801,8 +1853,8 @@ int dsa_slave_create(struct dsa_port *port) slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); - p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!p->stats64) { + slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!slave_dev->tstats) { free_netdev(slave_dev); return -ENOMEM; } @@ -1813,15 +1865,15 @@ int dsa_slave_create(struct dsa_port *port) p->dp = port; INIT_LIST_HEAD(&p->mall_tc_list); - p->xmit = cpu_dp->tag_ops->xmit; port->slave = slave_dev; + dsa_slave_setup_tagger(slave_dev); rtnl_lock(); ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); rtnl_unlock(); if (ret && ret != -EOPNOTSUPP) - dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n", - ret, port->index); + dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", + ret, ETH_DATA_LEN, port->index); netif_carrier_off(slave_dev); @@ -1833,8 +1885,6 @@ int dsa_slave_create(struct dsa_port *port) goto out_gcells; } - dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); - rtnl_lock(); ret = register_netdevice(slave_dev); @@ -1864,7 +1914,7 @@ out_phy: out_gcells: gro_cells_destroy(&p->gcells); out_free: - free_percpu(p->stats64); + free_percpu(slave_dev->tstats); free_netdev(slave_dev); port->slave = NULL; return ret; @@ -1883,10 +1933,9 @@ void dsa_slave_destroy(struct net_device *slave_dev) phylink_disconnect_phy(dp->pl); rtnl_unlock(); - dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); phylink_destroy(dp->pl); gro_cells_destroy(&p->gcells); - free_percpu(p->stats64); + free_percpu(slave_dev->tstats); free_netdev(slave_dev); } @@ -1894,6 +1943,7 @@ bool dsa_slave_dev_check(const struct net_device *dev) { return dev->netdev_ops == &dsa_slave_netdev_ops; } +EXPORT_SYMBOL_GPL(dsa_slave_dev_check); static int dsa_slave_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) @@ -1911,6 +1961,59 @@ static int dsa_slave_changeupper(struct net_device *dev, dsa_port_bridge_leave(dp, info->upper_dev); err = NOTIFY_OK; } + } else if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) { + err = dsa_port_lag_join(dp, info->upper_dev, + info->upper_info); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_MOD(info->info.extack, + "Offloading not supported"); + err = 0; + } + err = notifier_from_errno(err); + } else { + dsa_port_lag_leave(dp, info->upper_dev); + err = NOTIFY_OK; + } + } else if (is_hsr_master(info->upper_dev)) { + if (info->linking) { + err = dsa_port_hsr_join(dp, info->upper_dev); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_MOD(info->info.extack, + "Offloading not supported"); + err = 0; + } + err = notifier_from_errno(err); + } else { + dsa_port_hsr_leave(dp, info->upper_dev); + err = NOTIFY_OK; + } + } + + return err; +} + +static int +dsa_slave_lag_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + struct dsa_port *dp; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!dsa_slave_dev_check(lower)) + continue; + + dp = dsa_slave_to_port(lower); + if (!dp->lag_dev) + /* Software LAG */ + continue; + + err = dsa_slave_changeupper(lower, info); + if (notifier_to_errno(err)) + break; } return err; @@ -1987,137 +2090,245 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, switch (event) { case NETDEV_PRECHANGEUPPER: { struct netdev_notifier_changeupper_info *info = ptr; + struct dsa_switch *ds; + struct dsa_port *dp; + int err; if (!dsa_slave_dev_check(dev)) return dsa_prevent_bridging_8021q_upper(dev, ptr); + dp = dsa_slave_to_port(dev); + ds = dp->ds; + + if (ds->ops->port_prechangeupper) { + err = ds->ops->port_prechangeupper(ds, dp->index, info); + if (err) + return notifier_from_errno(err); + } + if (is_vlan_dev(info->upper_dev)) return dsa_slave_check_8021q_upper(dev, ptr); break; } case NETDEV_CHANGEUPPER: + if (dsa_slave_dev_check(dev)) + return dsa_slave_changeupper(dev, ptr); + + if (netif_is_lag_master(dev)) + return dsa_slave_lag_changeupper(dev, ptr); + + break; + case NETDEV_CHANGELOWERSTATE: { + struct netdev_notifier_changelowerstate_info *info = ptr; + struct dsa_port *dp; + int err; + if (!dsa_slave_dev_check(dev)) + break; + + dp = dsa_slave_to_port(dev); + + err = dsa_port_lag_change(dp, info->lower_state_info); + return notifier_from_errno(err); + } + case NETDEV_GOING_DOWN: { + struct dsa_port *dp, *cpu_dp; + struct dsa_switch_tree *dst; + LIST_HEAD(close_list); + + if (!netdev_uses_dsa(dev)) return NOTIFY_DONE; - return dsa_slave_changeupper(dev, ptr); + cpu_dp = dev->dsa_ptr; + dst = cpu_dp->ds->dst; + + list_for_each_entry(dp, &dst->ports, list) { + if (!dsa_is_user_port(dp->ds, dp->index)) + continue; + + list_add(&dp->slave->close_list, &close_list); + } + + dev_close_many(&close_list, true); + + return NOTIFY_OK; + } + default: + break; } return NOTIFY_DONE; } -struct dsa_switchdev_event_work { - struct work_struct work; - struct switchdev_notifier_fdb_info fdb_info; - struct net_device *dev; - unsigned long event; -}; +static void +dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) +{ + struct dsa_switch *ds = switchdev_work->ds; + struct switchdev_notifier_fdb_info info; + struct dsa_port *dp; + + if (!dsa_is_user_port(ds, switchdev_work->port)) + return; + + info.addr = switchdev_work->addr; + info.vid = switchdev_work->vid; + info.offloaded = true; + dp = dsa_to_port(ds, switchdev_work->port); + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + dp->slave, &info.info, NULL); +} static void dsa_slave_switchdev_event_work(struct work_struct *work) { struct dsa_switchdev_event_work *switchdev_work = container_of(work, struct dsa_switchdev_event_work, work); - struct net_device *dev = switchdev_work->dev; - struct switchdev_notifier_fdb_info *fdb_info; - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = switchdev_work->ds; + struct dsa_port *dp; int err; + dp = dsa_to_port(ds, switchdev_work->port); + rtnl_lock(); switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: - fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) - break; - - err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid); + err = dsa_port_fdb_add(dp, switchdev_work->addr, + switchdev_work->vid); if (err) { - netdev_dbg(dev, "fdb add failed err=%d\n", err); + dev_err(ds->dev, + "port %d failed to add %pM vid %d to fdb: %d\n", + dp->index, switchdev_work->addr, + switchdev_work->vid, err); break; } - fdb_info->offloaded = true; - call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, - &fdb_info->info, NULL); + dsa_fdb_offload_notify(switchdev_work); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: - fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) - break; - - err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid); + err = dsa_port_fdb_del(dp, switchdev_work->addr, + switchdev_work->vid); if (err) { - netdev_dbg(dev, "fdb del failed err=%d\n", err); - dev_close(dev); + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from fdb: %d\n", + dp->index, switchdev_work->addr, + switchdev_work->vid, err); } + break; } rtnl_unlock(); - kfree(switchdev_work->fdb_info.addr); kfree(switchdev_work); - dev_put(dev); + if (dsa_is_user_port(ds, dp->index)) + dev_put(dp->slave); } -static int -dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work * - switchdev_work, - const struct switchdev_notifier_fdb_info * - fdb_info) -{ - memcpy(&switchdev_work->fdb_info, fdb_info, - sizeof(switchdev_work->fdb_info)); - switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); - if (!switchdev_work->fdb_info.addr) - return -ENOMEM; - ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, - fdb_info->addr); +static int dsa_lower_dev_walk(struct net_device *lower_dev, + struct netdev_nested_priv *priv) +{ + if (dsa_slave_dev_check(lower_dev)) { + priv->data = (void *)netdev_priv(lower_dev); + return 1; + } + return 0; } +static struct dsa_slave_priv *dsa_slave_dev_lower_find(struct net_device *dev) +{ + struct netdev_nested_priv priv = { + .data = NULL, + }; + + netdev_walk_all_lower_dev_rcu(dev, dsa_lower_dev_walk, &priv); + + return (struct dsa_slave_priv *)priv.data; +} + /* Called under rcu_read_lock() */ static int dsa_slave_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + const struct switchdev_notifier_fdb_info *fdb_info; struct dsa_switchdev_event_work *switchdev_work; + struct dsa_port *dp; int err; - if (event == SWITCHDEV_PORT_ATTR_SET) { + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: err = switchdev_handle_port_attr_set(dev, ptr, dsa_slave_dev_check, dsa_slave_port_attr_set); return notifier_from_errno(err); - } + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = ptr; - if (!dsa_slave_dev_check(dev)) - return NOTIFY_DONE; + if (dsa_slave_dev_check(dev)) { + if (!fdb_info->added_by_user) + return NOTIFY_OK; - switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); - if (!switchdev_work) - return NOTIFY_BAD; + dp = dsa_slave_to_port(dev); + } else { + /* Snoop addresses learnt on foreign interfaces + * bridged with us, for switches that don't + * automatically learn SA from CPU-injected traffic + */ + struct net_device *br_dev; + struct dsa_slave_priv *p; + + br_dev = netdev_master_upper_dev_get_rcu(dev); + if (!br_dev) + return NOTIFY_DONE; + + if (!netif_is_bridge_master(br_dev)) + return NOTIFY_DONE; + + p = dsa_slave_dev_lower_find(br_dev); + if (!p) + return NOTIFY_DONE; + + dp = p->dp->cpu_dp; + + if (!dp->ds->assisted_learning_on_cpu_port) + return NOTIFY_DONE; + + /* When the bridge learns an address on an offloaded + * LAG we don't want to send traffic to the CPU, the + * other ports bridged with the LAG should be able to + * autonomously forward towards it. + */ + if (dsa_tree_offloads_netdev(dp->ds->dst, dev)) + return NOTIFY_DONE; + } - INIT_WORK(&switchdev_work->work, - dsa_slave_switchdev_event_work); - switchdev_work->dev = dev; - switchdev_work->event = event; + if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del) + return NOTIFY_DONE; - switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) - goto err_fdb_work_init; - dev_hold(dev); + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, + dsa_slave_switchdev_event_work); + switchdev_work->ds = dp->ds; + switchdev_work->port = dp->index; + switchdev_work->event = event; + + ether_addr_copy(switchdev_work->addr, + fdb_info->addr); + switchdev_work->vid = fdb_info->vid; + + /* Hold a reference on the slave for dsa_fdb_offload_notify */ + if (dsa_is_user_port(dp->ds, dp->index)) + dev_hold(dev); + dsa_schedule_work(&switchdev_work->work); break; default: - kfree(switchdev_work); return NOTIFY_DONE; } - dsa_schedule_work(&switchdev_work->work); return NOTIFY_OK; - -err_fdb_work_init: - kfree(switchdev_work); - return NOTIFY_BAD; } static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, |