From 9807a54cd74149988f5d20088bf7a7957c205bfb Mon Sep 17 00:00:00 2001 From: Jarno Rajahalme Date: Mon, 7 Jan 2013 09:54:31 -0800 Subject: linux/openvswitch.h: Make OVSP_LOCAL 32-bit. OVS ports are now 32-bit, so OVSP_LOCAL should be too. (Internally, kernel module still keeps port numbers 16-bit, though.) Signed-off-by: Jarno Rajahalme Signed-off-by: Jesse Gross --- include/linux/openvswitch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index d42e174bd0c8..99e6414a40d9 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -94,7 +94,7 @@ struct ovs_vport_stats { }; /* Fixed logical ports. */ -#define OVSP_LOCAL ((__u16)0) +#define OVSP_LOCAL ((__u32)0) /* Packet transfer. */ -- cgit v1.2.3-70-g09d2 From 4490108b4a5ada14c7be712260829faecc814ae5 Mon Sep 17 00:00:00 2001 From: Ben Pfaff Date: Fri, 15 Feb 2013 17:29:22 -0800 Subject: openvswitch: Allow OVS_USERSPACE_ATTR_USERDATA to be variable length. Until now, the optional OVS_USERSPACE_ATTR_USERDATA attribute had to be exactly 64 bits long, if it was present. However, 64 bits is not enough space to associate as much information with a flow as would be convenient for some userspace features now under development. This commit generalizes the attribute, allowing it to be any length. This generalization is backward-compatible: if userspace only uses 64-bit attributes, then it will not see any change in behavior. CC: Romain Lenglet Signed-off-by: Ben Pfaff Signed-off-by: Jesse Gross --- include/linux/openvswitch.h | 11 ++++++----- net/openvswitch/datapath.c | 11 ++++++----- net/openvswitch/datapath.h | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index 99e6414a40d9..67d6c7b03581 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -127,7 +127,8 @@ enum ovs_packet_cmd { * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an - * %OVS_USERSPACE_ATTR_USERDATA attribute. + * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content + * specified there. * * These attributes follow the &struct ovs_header within the Generic Netlink * payload for %OVS_PACKET_* commands. @@ -137,7 +138,7 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_PACKET, /* Packet data. */ OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ + OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ __OVS_PACKET_ATTR_MAX }; @@ -389,13 +390,13 @@ enum ovs_sample_attr { * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION * message should be sent. Required. - * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the - * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, + * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is + * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ - OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ + OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ __OVS_USERSPACE_ATTR_MAX }; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f9d2438e6437..96cd5b243d57 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -370,8 +370,8 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); - if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) - len += nla_total_size(8); + if (upcall_info->userdata) + len += NLA_ALIGN(upcall_info->userdata->nla_len); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { @@ -388,8 +388,9 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, nla_nest_end(user_skb, nla); if (upcall_info->userdata) - nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, - nla_get_u64(upcall_info->userdata)); + __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, + nla_len(upcall_info->userdata), + nla_data(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); @@ -544,7 +545,7 @@ static int validate_userspace(const struct nlattr *attr) { static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, - [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, + [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 031dfbf37c93..9125ad5c5aeb 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -119,7 +119,7 @@ struct ovs_skb_cb { * struct dp_upcall - metadata to include with a packet to send to userspace * @cmd: One of %OVS_PACKET_CMD_*. * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull. - * @userdata: If nonnull, its u64 value is extracted and passed to userspace as + * @userdata: If nonnull, its variable-length value is passed to userspace as * %OVS_PACKET_ATTR_USERDATA. * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no * packet is sent and the packet is accounted in the datapath's @n_lost -- cgit v1.2.3-70-g09d2 From 82dc3c63c692b1e1d59378ecee948ac88e034aad Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 5 Mar 2013 15:57:22 +0000 Subject: net: introduce NAPI_POLL_WEIGHT Some drivers use a too big NAPI poll weight. This patch adds a NAPI_POLL_WEIGHT default value and issues an error message if a driver attempts to use a bigger weight. Signed-off-by: Eric Dumazet Cc: Eilon Greenstein Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/core/dev.c | 3 +++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b314..896eb4985f97 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1475,6 +1475,11 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) +/* Default NAPI poll() weight + * Device drivers are strongly advised to not use bigger value + */ +#define NAPI_POLL_WEIGHT 64 + /** * netif_napi_add - initialize a napi context * @dev: network device diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..96103894ad69 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4057,6 +4057,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; + if (weight > NAPI_POLL_WEIGHT) + pr_err_once("netif_napi_add() called with weight %d on device %s\n", + weight, dev->name); napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; -- cgit v1.2.3-70-g09d2 From 9a886586c82aa02cb49f8c85e961595716884545 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 15 Feb 2013 19:25:00 +0100 Subject: wireless: move sequence number arithmetic to ieee80211.h Move the sequence number arithmetic code from mac80211 to ieee80211.h so others can use it. Also rename the functions from _seq to _sn, they operate on the sequence number, not the sequence_control field. Also move macros to convert the sequence control to/from the sequence number value from various drivers. Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlegacy/3945.h | 4 --- drivers/net/wireless/iwlegacy/4965-mac.c | 13 ++++---- drivers/net/wireless/iwlegacy/common.h | 4 --- drivers/net/wireless/iwlwifi/dvm/tx.c | 11 ++++--- drivers/net/wireless/iwlwifi/iwl-trans.h | 3 -- drivers/net/wireless/iwlwifi/mvm/sta.c | 4 +-- drivers/net/wireless/iwlwifi/mvm/tx.c | 2 +- drivers/net/wireless/iwlwifi/pcie/tx.c | 2 +- drivers/net/wireless/rtlwifi/wifi.h | 3 -- include/linux/ieee80211.h | 28 +++++++++++++++++ net/mac80211/rx.c | 53 +++++++++++++------------------- 11 files changed, 66 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h index 1d45075e0d5b..9a8703def0ba 100644 --- a/drivers/net/wireless/iwlegacy/3945.h +++ b/drivers/net/wireless/iwlegacy/3945.h @@ -150,10 +150,6 @@ struct il3945_frame { struct list_head list; }; -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - #define SUP_RATE_11A_MAX_NUM_CHANNELS 8 #define SUP_RATE_11B_MAX_NUM_CHANNELS 4 #define SUP_RATE_11G_MAX_NUM_CHANNELS 12 diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 7941eb3a0166..c092fcbbe965 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -2258,7 +2258,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, spin_lock_irqsave(&il->sta_lock, flags); tid_data = &il->stations[sta_id].tid[tid]; - *ssn = SEQ_TO_SN(tid_data->seq_number); + *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->agg.txq_id = txq_id; il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); spin_unlock_irqrestore(&il->sta_lock, flags); @@ -2408,7 +2408,7 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id) /* aggregated HW queue */ if (txq_id == tid_data->agg.txq_id && q->read_ptr == q->write_ptr) { - u16 ssn = SEQ_TO_SN(tid_data->seq_number); + u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); int tx_fifo = il4965_get_fifo_from_tid(tid); D_HT("HW queue empty: continue DELBA flow\n"); il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); @@ -2627,7 +2627,8 @@ il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr) static inline u32 il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) { - return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; + return le32_to_cpup(&tx_resp->u.status + + tx_resp->frame_count) & IEEE80211_MAX_SN; } static inline u32 @@ -2717,15 +2718,15 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg, hdr = (struct ieee80211_hdr *) skb->data; sc = le16_to_cpu(hdr->seq_ctrl); - if (idx != (SEQ_TO_SN(sc) & 0xff)) { + if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) { IL_ERR("BUG_ON idx doesn't match seq control" " idx=%d, seq_idx=%d, seq=%d\n", idx, - SEQ_TO_SN(sc), hdr->seq_ctrl); + IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl); return -1; } D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, - SEQ_TO_SN(sc)); + IEEE80211_SEQ_TO_SN(sc)); sh = idx - start; if (sh > 64) { diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index 96f2025d936e..73bd3ef316c8 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@ -541,10 +541,6 @@ struct il_frame { struct list_head list; }; -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) - enum { CMD_SYNC = 0, CMD_SIZE_NORMAL = 0, diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d499a0366fa6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -418,7 +418,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, " Tx flags = 0x%08x, agg.state = %d", info->flags, tid_data->agg.state); IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", - sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); + sta_id, tid, + IEEE80211_SEQ_TO_SN(tid_data->seq_number)); goto drop_unlock_sta; } @@ -569,7 +570,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, return 0; } - tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* There are still packets for this RA / TID in the HW */ if (!test_bit(txq_id, priv->agg_q_alloc)) { @@ -651,7 +652,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, spin_lock_bh(&priv->sta_lock); tid_data = &priv->tid_data[sta_id][tid]; - tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->agg.txq_id = txq_id; *ssn = tid_data->agg.ssn; @@ -911,7 +912,7 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & MAX_SN; + tx_resp->frame_count) & IEEE80211_MAX_SN; } static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, @@ -1148,7 +1149,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, if (tx_resp->frame_count == 1) { u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); - next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); + next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10); if (is_agg) { /* If this is an aggregation queue, we can rely on the diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 8c7bec6b9a0b..00bdc5b00af3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -114,9 +114,6 @@ * completely agnostic to these differences. * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode), */ -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) #define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) #define SEQ_TO_INDEX(s) ((s) & 0xff) diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 861a7f9f8e7f..52aecf20d0df 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -686,7 +686,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvmsta->lock); tid_data = &mvmsta->tid_data[tid]; - tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; *ssn = tid_data->ssn; @@ -779,7 +779,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, switch (tid_data->state) { case IWL_AGG_ON: - tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 6b67ce3f679c..56df249b215e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -641,7 +641,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, next_reclaimed = ssn; } else { /* The next packet to be reclaimed is the one after this one */ - next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10); + next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); } IWL_DEBUG_TX_REPLY(mvm, diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8e9e3212fe78..ad7441dfa6fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1581,7 +1581,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * Check here that the packets are in the right place on the ring. */ #ifdef CONFIG_IWLWIFI_DEBUG - wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && ((wifi_seq & 0xff) != q->write_ptr), "Q: %d WiFi Seq %d tfdNum %d", diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index f13258a8d995..c3eff32acf6c 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -2127,9 +2127,6 @@ value to host byte ordering.*/ #define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) #define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) #define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA) -#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) -#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) -#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) #define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */ #define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7e24fe0cfbcd..a0c550fb65a6 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -113,6 +113,34 @@ #define IEEE80211_CTL_EXT_SSW_FBACK 0x9000 #define IEEE80211_CTL_EXT_SSW_ACK 0xa000 + +#define IEEE80211_SN_MASK ((IEEE80211_SCTL_SEQ) >> 4) +#define IEEE80211_MAX_SN IEEE80211_SN_MASK +#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) + +static inline int ieee80211_sn_less(u16 sn1, u16 sn2) +{ + return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); +} + +static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2) +{ + return (sn1 + sn2) & IEEE80211_SN_MASK; +} + +static inline u16 ieee80211_sn_inc(u16 sn) +{ + return ieee80211_sn_add(sn, 1); +} + +static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) +{ + return (sn1 - sn2) & IEEE80211_SN_MASK; +} + +#define IEEE80211_SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define IEEE80211_SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) + /* miscellaneous IEEE 802.11 constants */ #define IEEE80211_MAX_FRAG_THRESHOLD 2352 #define IEEE80211_MAX_RTS_THRESHOLD 2353 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index acf006f2d61a..1f940e2b6f27 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -648,24 +648,6 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) return RX_CONTINUE; } -#define SEQ_MODULO 0x1000 -#define SEQ_MASK 0xfff - -static inline int seq_less(u16 sq1, u16 sq2) -{ - return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); -} - -static inline u16 seq_inc(u16 sq) -{ - return (sq + 1) & SEQ_MASK; -} - -static inline u16 seq_sub(u16 sq1, u16 sq2) -{ - return (sq1 - sq2) & SEQ_MASK; -} - static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_rx *tid_agg_rx, int index, @@ -687,7 +669,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, __skb_queue_tail(frames, skb); no_frame: - tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); + tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); } static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, @@ -699,8 +681,9 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata lockdep_assert_held(&tid_agg_rx->reorder_lock); - while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { - index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % + while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { + index = ieee80211_sn_sub(tid_agg_rx->head_seq_num, + tid_agg_rx->ssn) % tid_agg_rx->buf_size; ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, frames); @@ -727,8 +710,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, lockdep_assert_held(&tid_agg_rx->reorder_lock); /* release the buffer until next missing frame */ - index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % - tid_agg_rx->buf_size; + index = ieee80211_sn_sub(tid_agg_rx->head_seq_num, + tid_agg_rx->ssn) % tid_agg_rx->buf_size; if (!tid_agg_rx->reorder_buf[index] && tid_agg_rx->stored_mpdu_num) { /* @@ -756,19 +739,22 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, * Increment the head seq# also for the skipped slots. */ tid_agg_rx->head_seq_num = - (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; + (tid_agg_rx->head_seq_num + + skipped) & IEEE80211_SN_MASK; skipped = 0; } } else while (tid_agg_rx->reorder_buf[index]) { ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, frames); - index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % + index = ieee80211_sn_sub(tid_agg_rx->head_seq_num, + tid_agg_rx->ssn) % tid_agg_rx->buf_size; } if (tid_agg_rx->stored_mpdu_num) { - j = index = seq_sub(tid_agg_rx->head_seq_num, - tid_agg_rx->ssn) % tid_agg_rx->buf_size; + j = index = ieee80211_sn_sub(tid_agg_rx->head_seq_num, + tid_agg_rx->ssn) % + tid_agg_rx->buf_size; for (; j != (index - 1) % tid_agg_rx->buf_size; j = (j + 1) % tid_agg_rx->buf_size) { @@ -809,7 +795,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata head_seq_num = tid_agg_rx->head_seq_num; /* frame with out of date sequence number */ - if (seq_less(mpdu_seq_num, head_seq_num)) { + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { dev_kfree_skb(skb); goto out; } @@ -818,8 +804,9 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata * If frame the sequence number exceeds our buffering window * size release some previous frames to make room for this one. */ - if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { - head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); + if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { + head_seq_num = ieee80211_sn_inc( + ieee80211_sn_sub(mpdu_seq_num, buf_size)); /* release stored frames up to new head to stack */ ieee80211_release_reorder_frames(sdata, tid_agg_rx, head_seq_num, frames); @@ -827,7 +814,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata /* Now the new frame is always in the range of the reordering buffer */ - index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; + index = ieee80211_sn_sub(mpdu_seq_num, + tid_agg_rx->ssn) % tid_agg_rx->buf_size; /* check if we already stored this frame */ if (tid_agg_rx->reorder_buf[index]) { @@ -843,7 +831,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata */ if (mpdu_seq_num == tid_agg_rx->head_seq_num && tid_agg_rx->stored_mpdu_num == 0) { - tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); + tid_agg_rx->head_seq_num = + ieee80211_sn_inc(tid_agg_rx->head_seq_num); ret = false; goto out; } -- cgit v1.2.3-70-g09d2 From b8a31c9a5afff257cc5dd637cda5fef03e12d67b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 22 Feb 2013 17:28:49 +0100 Subject: ieee80211: mark 802.11 related structs as being 2-byte aligned Regardless of what header features they use, or if they align the IP header or not, 802.11 packets from all drivers guarantee a 2-byte alignment (and there's a debug WARN_ON in case they don't). Annotate packet structs with __aligned(2) to allow the compiler to use 16-bit load/store operations on platforms with extremely inefficient unaligned access (e.g. MIPS). This reduces code size and improves performance on affected platforms and causes no binary code change on others. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a0c550fb65a6..6e352c31fee0 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -213,7 +213,7 @@ struct ieee80211_hdr { u8 addr3[6]; __le16 seq_ctrl; u8 addr4[6]; -} __packed; +} __packed __aligned(2); struct ieee80211_hdr_3addr { __le16 frame_control; @@ -222,7 +222,7 @@ struct ieee80211_hdr_3addr { u8 addr2[6]; u8 addr3[6]; __le16 seq_ctrl; -} __packed; +} __packed __aligned(2); struct ieee80211_qos_hdr { __le16 frame_control; @@ -232,7 +232,7 @@ struct ieee80211_qos_hdr { u8 addr3[6]; __le16 seq_ctrl; __le16 qos_ctrl; -} __packed; +} __packed __aligned(2); /** * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set @@ -609,7 +609,7 @@ struct ieee80211s_hdr { __le32 seqnum; u8 eaddr1[6]; u8 eaddr2[6]; -} __packed; +} __packed __aligned(2); /* Mesh flags */ #define MESH_FLAGS_AE_A4 0x1 @@ -903,7 +903,7 @@ struct ieee80211_mgmt { } u; } __packed action; } u; -} __packed; +} __packed __aligned(2); /* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 @@ -934,20 +934,20 @@ struct ieee80211_rts { __le16 duration; u8 ra[6]; u8 ta[6]; -} __packed; +} __packed __aligned(2); struct ieee80211_cts { __le16 frame_control; __le16 duration; u8 ra[6]; -} __packed; +} __packed __aligned(2); struct ieee80211_pspoll { __le16 frame_control; __le16 aid; u8 bssid[6]; u8 ta[6]; -} __packed; +} __packed __aligned(2); /* TDLS */ -- cgit v1.2.3-70-g09d2 From c8bb93f5f5d478a01db66127844d1d2dd30abec7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 21 Feb 2013 17:26:44 +0100 Subject: wireless: remove unused VHT MCS defines There's an enum with the same values (but slightly different names except for NOT_SUPPORTED) that is actually used, so remove the defines. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 6e352c31fee0..35c1f96d9365 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1318,11 +1318,6 @@ struct ieee80211_vht_operation { } __packed; -#define IEEE80211_VHT_MCS_ZERO_TO_SEVEN_SUPPORT 0 -#define IEEE80211_VHT_MCS_ZERO_TO_EIGHT_SUPPORT 1 -#define IEEE80211_VHT_MCS_ZERO_TO_NINE_SUPPORT 2 -#define IEEE80211_VHT_MCS_NOT_SUPPORTED 3 - /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 -- cgit v1.2.3-70-g09d2 From 55d942f4246c79a8f3f17f92c224e641c5c26125 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 1 Mar 2013 13:07:48 +0100 Subject: mac80211: restrict peer's VHT capabilities to own Implement restricting peer VHT capabilities to the device's own capabilities. This is useful when a single driver supports more than one device and the devices have different capabilities (often they will differ in the number of spatial streams), but in particular is also necessary for VHT capability overrides to work correctly -- otherwise it'd be possible to e.g. advertise, due to overrides, that TX-STBC is not supported, but then still use it to TX to the AP because it supports RX-STBC. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 3 +- include/net/mac80211.h | 5 +- net/mac80211/vht.c | 114 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 117 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 35c1f96d9365..4cf0c9e4dd99 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1333,10 +1333,11 @@ struct ieee80211_vht_operation { #define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200 #define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 #define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 +#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 #define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000 -#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000 +#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00030000 #define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 #define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 #define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 421c3ac8c521..cdd7cea1fd4c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1228,9 +1228,8 @@ enum ieee80211_sta_rx_bandwidth { * @addr: MAC address * @aid: AID we assigned to the station if we're an AP * @supp_rates: Bitmap of supported rates (per band) - * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities - * @vht_cap: VHT capabilities of this STA; Not restricting any capabilities - * of remote STA. Taking as is. + * @ht_cap: HT capabilities of this STA; restricted to our own capabilities + * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities * @wme: indicates whether the STA supports WME. Only valid during AP-mode. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index cacc1c74556a..171344d4eb7c 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -118,6 +118,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + struct ieee80211_sta_vht_cap own_cap; + u32 cap_info, i; memset(vht_cap, 0, sizeof(*vht_cap)); @@ -133,12 +135,122 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, vht_cap->vht_supported = true; - vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); + own_cap = sband->vht_cap; + /* + * If user has specified capability overrides, take care + * of that if the station we're setting up is the AP that + * we advertised a restricted capability set to. Override + * our own capabilities and then use those below. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + ieee80211_apply_vhtcap_overrides(sdata, &own_cap); + + /* take some capabilities as-is */ + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); + vht_cap->cap = cap_info; + vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_VHT_TXOP_PS | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB | + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + + /* and some based on our own capabilities */ + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + break; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + break; + default: + /* nothing */ + break; + } + + /* symmetric capabilities */ + vht_cap->cap |= cap_info & own_cap.cap & + (IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_SHORT_GI_160); + + /* remaining ones */ + if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { + vht_cap->cap |= cap_info & + (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX | + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX); + } + + if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + + if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + vht_cap->cap |= cap_info & + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC) + vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK; + + if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC; /* Copy peer MCS info, the driver might need them. */ memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, sizeof(struct ieee80211_vht_mcs_info)); + /* but also restrict MCSes */ + for (i = 0; i < 8; i++) { + u16 own_rx, own_tx, peer_rx, peer_tx; + + own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map); + own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map); + own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + + if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED) + peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED; + else if (own_rx < peer_tx) + peer_tx = own_rx; + } + + if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED) + peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED; + else if (own_tx < peer_rx) + peer_rx = own_tx; + } + + vht_cap->vht_mcs.rx_mcs_map &= + ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); + vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2); + + vht_cap->vht_mcs.tx_mcs_map &= + ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); + vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); + } + + /* finally set up the bandwidth */ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: -- cgit v1.2.3-70-g09d2 From acbba0d0f88e2577b9d92b61b136d13f65831a52 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 6 Mar 2013 01:31:12 +0000 Subject: team: introduce two default team_modeop functions and use them in modes No need to duplicate code for this. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 19 ++++++++++++++++--- drivers/net/team/team_mode_broadcast.c | 14 ++------------ drivers/net/team/team_mode_roundrobin.c | 14 ++------------ include/linux/if_team.h | 5 ++++- 4 files changed, 24 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 05c5efe84591..ece70a4abbb1 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port) return __set_port_dev_addr(port->dev, port->orig.dev_addr); } -int team_port_set_team_dev_addr(struct team_port *port) +static int team_port_set_team_dev_addr(struct team *team, + struct team_port *port) +{ + return __set_port_dev_addr(port->dev, team->dev->dev_addr); +} + +int team_modeop_port_enter(struct team *team, struct team_port *port) +{ + return team_port_set_team_dev_addr(team, port); +} +EXPORT_SYMBOL(team_modeop_port_enter); + +void team_modeop_port_change_dev_addr(struct team *team, + struct team_port *port) { - return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); + team_port_set_team_dev_addr(team, port); } -EXPORT_SYMBOL(team_port_set_team_dev_addr); +EXPORT_SYMBOL(team_modeop_port_change_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index c5db428e73fa..c366cd299c06 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) return sum_ret; } -static int bc_port_enter(struct team *team, struct team_port *port) -{ - return team_port_set_team_dev_addr(port); -} - -static void bc_port_change_dev_addr(struct team *team, struct team_port *port) -{ - team_port_set_team_dev_addr(port); -} - static const struct team_mode_ops bc_mode_ops = { .transmit = bc_transmit, - .port_enter = bc_port_enter, - .port_change_dev_addr = bc_port_change_dev_addr, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, }; static const struct team_mode bc_mode = { diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index 105135aa8f05..ed63a6bc66ce 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -64,20 +64,10 @@ drop: return false; } -static int rr_port_enter(struct team *team, struct team_port *port) -{ - return team_port_set_team_dev_addr(port); -} - -static void rr_port_change_dev_addr(struct team *team, struct team_port *port) -{ - team_port_set_team_dev_addr(port); -} - static const struct team_mode_ops rr_mode_ops = { .transmit = rr_transmit, - .port_enter = rr_port_enter, - .port_change_dev_addr = rr_port_change_dev_addr, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, }; static const struct team_mode rr_mode = { diff --git a/include/linux/if_team.h b/include/linux/if_team.h index cfd21e3d5506..3283def74483 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -112,6 +112,10 @@ struct team_mode_ops { void (*port_disabled)(struct team *team, struct team_port *port); }; +extern int team_modeop_port_enter(struct team *team, struct team_port *port); +extern void team_modeop_port_change_dev_addr(struct team *team, + struct team_port *port); + enum team_option_type { TEAM_OPTION_TYPE_U32, TEAM_OPTION_TYPE_STRING, @@ -236,7 +240,6 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team, return NULL; } -extern int team_port_set_team_dev_addr(struct team_port *port); extern int team_options_register(struct team *team, const struct team_option *option, size_t option_count); -- cgit v1.2.3-70-g09d2 From 753f993911b32e479b4fab5d228dc07c11d1e7e7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 6 Mar 2013 01:31:13 +0000 Subject: team: introduce random mode As suggested by Eric Dumazet, allow user to select mode which chooses TX port randomly. Functionality should be more of less similar to round-robin mode with even lower overhead. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/Kconfig | 12 ++++++ drivers/net/team/Makefile | 1 + drivers/net/team/team_mode_random.c | 71 +++++++++++++++++++++++++++++++++ drivers/net/team/team_mode_roundrobin.c | 22 +--------- include/linux/if_team.h | 20 ++++++++++ 5 files changed, 105 insertions(+), 21 deletions(-) create mode 100644 drivers/net/team/team_mode_random.c (limited to 'include/linux') diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig index c3011af68e91..c853d84fd99f 100644 --- a/drivers/net/team/Kconfig +++ b/drivers/net/team/Kconfig @@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN To compile this team mode as a module, choose M here: the module will be called team_mode_roundrobin. +config NET_TEAM_MODE_RANDOM + tristate "Random mode support" + depends on NET_TEAM + ---help--- + Basic mode where port used for transmitting packets is selected + randomly. + + All added ports are setup to have team's device address. + + To compile this team mode as a module, choose M here: the module + will be called team_mode_random. + config NET_TEAM_MODE_ACTIVEBACKUP tristate "Active-backup mode support" depends on NET_TEAM diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile index 975763014e5a..c57e85889751 100644 --- a/drivers/net/team/Makefile +++ b/drivers/net/team/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_NET_TEAM) += team.o obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o +obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c new file mode 100644 index 000000000000..9eabfaa22f3e --- /dev/null +++ b/drivers/net/team/team_mode_random.c @@ -0,0 +1,71 @@ +/* + * drivers/net/team/team_mode_random.c - Random mode for team + * Copyright (c) 2013 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +static u32 random_N(unsigned int N) +{ + return reciprocal_divide(random32(), N); +} + +static bool rnd_transmit(struct team *team, struct sk_buff *skb) +{ + struct team_port *port; + int port_index; + + port_index = random_N(team->en_port_count); + port = team_get_port_by_index_rcu(team, port_index); + port = team_get_first_port_txable_rcu(team, port); + if (unlikely(!port)) + goto drop; + if (team_dev_queue_xmit(team, port, skb)) + return false; + return true; + +drop: + dev_kfree_skb_any(skb); + return false; +} + +static const struct team_mode_ops rnd_mode_ops = { + .transmit = rnd_transmit, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, +}; + +static const struct team_mode rnd_mode = { + .kind = "random", + .owner = THIS_MODULE, + .ops = &rnd_mode_ops, +}; + +static int __init rnd_init_module(void) +{ + return team_mode_register(&rnd_mode); +} + +static void __exit rnd_cleanup_module(void) +{ + team_mode_unregister(&rnd_mode); +} + +module_init(rnd_init_module); +module_exit(rnd_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Random mode for team"); +MODULE_ALIAS("team-mode-random"); diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index ed63a6bc66ce..d268e4de781b 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team) return (struct rr_priv *) &team->mode_priv; } -static struct team_port *__get_first_port_up(struct team *team, - struct team_port *port) -{ - struct team_port *cur; - - if (team_port_txable(port)) - return port; - cur = port; - list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(port)) - return cur; - list_for_each_entry_rcu(cur, &team->port_list, list) { - if (cur == port) - break; - if (team_port_txable(port)) - return cur; - } - return NULL; -} - static bool rr_transmit(struct team *team, struct sk_buff *skb) { struct team_port *port; @@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) port_index = rr_priv(team)->sent_packets++ % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); - port = __get_first_port_up(team, port); + port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; if (team_dev_queue_xmit(team, port, skb)) diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 3283def74483..4474557904f6 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -240,6 +240,26 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team, return NULL; } +static inline struct team_port * +team_get_first_port_txable_rcu(struct team *team, struct team_port *port) +{ + struct team_port *cur; + + if (likely(team_port_txable(port))) + return port; + cur = port; + list_for_each_entry_continue_rcu(cur, &team->port_list, list) + if (team_port_txable(port)) + return cur; + list_for_each_entry_rcu(cur, &team->port_list, list) { + if (cur == port) + break; + if (team_port_txable(port)) + return cur; + } + return NULL; +} + extern int team_options_register(struct team *team, const struct team_option *option, size_t option_count); -- cgit v1.2.3-70-g09d2 From 8524982847ff00b66ffb89314c342c51f4138ee7 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 18 Feb 2013 21:47:45 +0100 Subject: ssb: fix unaligned access to mac address The mac address should be aligned to u16 to prevent an unaligned access in drivers/ssb/pci.c where it is casted to __be16. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville --- include/linux/ssb/ssb.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 22958d68ecfe..8b1322296fed 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -26,9 +26,9 @@ struct ssb_sprom_core_pwr_info { struct ssb_sprom { u8 revision; - u8 il0mac[6]; /* MAC address for 802.11b/g */ - u8 et0mac[6]; /* MAC address for Ethernet */ - u8 et1mac[6]; /* MAC address for 802.11a */ + u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ + u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ + u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ u8 et0phyaddr; /* MII address for enet0 */ u8 et1phyaddr; /* MII address for enet1 */ u8 et0mdcport; /* MDIO for enet0 */ -- cgit v1.2.3-70-g09d2 From 090096bf3db1c281ddd034573260045888a68fea Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Wed, 6 Mar 2013 15:39:42 +0000 Subject: net: generic fdb support for drivers without ndo_fdb_ If the driver does not support the ndo_op use the generic handler for it. This should work in the majority of cases. Eventually the fdb_dflt_add call gets translated into a __dev_set_rx_mode() call which should handle hardware support for filtering via the IFF_UNICAST_FLT flag. Namely IFF_UNICAST_FLT indicates if the hardware can do unicast address filtering. If no support is available the device is put into promisc mode. Signed-off-by: Vlad Yasevich Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 9 ++++++ net/core/rtnetlink.c | 81 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 84 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 489dd7bb28ec..f28544b2f9af 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -69,6 +69,15 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx); +extern int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags); +extern int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..f95b6fbc29e9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2048,6 +2048,38 @@ errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } +/** + * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry + */ +int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags) +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} +EXPORT_SYMBOL(ndo_dflt_fdb_add); + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2100,10 +2132,13 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } /* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { - err = dev->netdev_ops->ndo_fdb_add(ndm, tb, - dev, addr, - nlh->nlmsg_flags); + if ((ndm->ndm_flags & NTF_SELF)) { + if (dev->netdev_ops->ndo_fdb_add) + err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags); + else + err = ndo_dflt_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags); if (!err) { rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); @@ -2114,6 +2149,35 @@ out: return err; } +/** + * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry + */ +int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr) +{ + int err = -EOPNOTSUPP; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + + return err; +} +EXPORT_SYMBOL(ndo_dflt_fdb_del); + static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2171,8 +2235,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } /* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { - err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + if (ndm->ndm_flags & NTF_SELF) { + if (dev->netdev_ops->ndo_fdb_del) + err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + else + err = ndo_dflt_fdb_del(ndm, tb, dev, addr); if (!err) { rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); @@ -2257,6 +2324,8 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); + else + ndo_dflt_fdb_dump(skb, cb, dev, idx); } rcu_read_unlock(); -- cgit v1.2.3-70-g09d2 From c031e234ee304b507b79f76a7677ea0a7a8890e8 Mon Sep 17 00:00:00 2001 From: Andy King Date: Thu, 7 Mar 2013 05:26:13 +0000 Subject: VSOCK: Split vm_sockets.h into kernel/uapi Split the vSockets header into kernel and UAPI parts. The former gets the bits that used to be in __KERNEL__ guards, while the latter gets everything that is user-visible. Tested by compiling vsock (+transport) and a simple user-mode vSockets application. Reported-by: David Howells Acked-by: Dmitry Torokhov Signed-off-by: Andy King Acked-by: David Howells Signed-off-by: David S. Miller --- include/linux/vm_sockets.h | 23 +++++++++++++++++++++++ include/uapi/linux/vm_sockets.h | 23 ++++++++--------------- 2 files changed, 31 insertions(+), 15 deletions(-) create mode 100644 include/linux/vm_sockets.h (limited to 'include/linux') diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h new file mode 100644 index 000000000000..0805eecba8f7 --- /dev/null +++ b/include/linux/vm_sockets.h @@ -0,0 +1,23 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _VM_SOCKETS_H +#define _VM_SOCKETS_H + +#include + +int vm_sockets_get_local_cid(void); + +#endif /* _VM_SOCKETS_H */ diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h index df91301847ec..b4ed5d895699 100644 --- a/include/uapi/linux/vm_sockets.h +++ b/include/uapi/linux/vm_sockets.h @@ -13,12 +13,10 @@ * more details. */ -#ifndef _VM_SOCKETS_H_ -#define _VM_SOCKETS_H_ +#ifndef _UAPI_VM_SOCKETS_H +#define _UAPI_VM_SOCKETS_H -#if !defined(__KERNEL__) -#include -#endif +#include /* Option name for STREAM socket buffer size. Use as the option name in * setsockopt(3) or getsockopt(3) to set or get an unsigned long long that @@ -137,14 +135,13 @@ #define VM_SOCKETS_VERSION_MINOR(_v) (((_v) & 0x0000FFFF)) /* Address structure for vSockets. The address family should be set to - * whatever vmci_sock_get_af_value_fd() returns. The structure members should - * all align on their natural boundaries without resorting to compiler packing - * directives. The total size of this structure should be exactly the same as - * that of struct sockaddr. + * AF_VSOCK. The structure members should all align on their natural + * boundaries without resorting to compiler packing directives. The total size + * of this structure should be exactly the same as that of struct sockaddr. */ struct sockaddr_vm { - sa_family_t svm_family; + __kernel_sa_family_t svm_family; unsigned short svm_reserved1; unsigned int svm_port; unsigned int svm_cid; @@ -156,8 +153,4 @@ struct sockaddr_vm { #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) -#if defined(__KERNEL__) -int vm_sockets_get_local_cid(void); -#endif - -#endif +#endif /* _UAPI_VM_SOCKETS_H */ -- cgit v1.2.3-70-g09d2 From ec5f061564238892005257c83565a0b58ec79295 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 7 Mar 2013 09:28:01 +0000 Subject: net: Kill link between CSUM and SG features. Earlier SG was unset if CSUM was not available for given device to force skb copy to avoid sending inconsistent csum. Commit c9af6db4c11c (net: Fix possible wrong checksum generation) added explicit flag to force copy to fix this issue. Therefore there is no need to link SG and CSUM, following patch kills this link between there two features. This patch is also required following patch in series. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/linux/netdevice.h | 13 ++++++++++ net/core/dev.c | 63 +++++++++++++++++++++++++---------------------- net/core/skbuff.c | 13 ++++++++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 3 --- 5 files changed, 59 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 896eb4985f97..e1ebeffa6b35 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2683,6 +2683,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { return __skb_gso_segment(skb, features, true); } +__be16 skb_network_protocol(struct sk_buff *skb); + +static inline bool can_checksum_protocol(netdev_features_t features, + __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_V4_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_V6_CSUM) && + protocol == htons(ETH_P_IPV6)) || + ((features & NETIF_F_FCOE_CRC) && + protocol == htons(ETH_P_FCOE))); +} #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); diff --git a/net/core/dev.c b/net/core/dev.c index 96103894ad69..bb999931729f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2208,16 +2208,8 @@ out: } EXPORT_SYMBOL(skb_checksum_help); -/** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) +__be16 skb_network_protocol(struct sk_buff *skb) { - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; __be16 type = skb->protocol; while (type == htons(ETH_P_8021Q)) { @@ -2225,13 +2217,31 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) - return ERR_PTR(-EINVAL); + return 0; vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } + return type; +} + +/** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + __be16 type = skb_network_protocol(skb); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + __skb_pull(skb, skb->mac_len); rcu_read_lock(); @@ -2398,24 +2408,12 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) return 0; } -static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) -{ - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); -} - static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; - features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } @@ -4921,20 +4919,25 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } - /* Fix illegal SG+CSUM combinations. */ - if ((features & NETIF_F_SG) && - !(features & NETIF_F_ALL_CSUM)) { - netdev_dbg(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); - features &= ~NETIF_F_SG; - } - /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } + if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IP_CSUM)) { + netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO; + features &= ~NETIF_F_TSO_ECN; + } + + if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IPV6_CSUM)) { + netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO6; + } + /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 33245ef54c3b..0a48ae20c903 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2741,12 +2741,19 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) unsigned int tnl_hlen = skb_tnl_header_len(skb); unsigned int headroom; unsigned int len; + __be16 proto; + bool csum; int sg = !!(features & NETIF_F_SG); int nfrags = skb_shinfo(skb)->nr_frags; int err = -ENOMEM; int i = 0; int pos; + proto = skb_network_protocol(skb); + if (unlikely(!proto)) + return ERR_PTR(-EINVAL); + + csum = !!can_checksum_protocol(features, proto); __skb_push(skb, doffset); headroom = skb_headroom(skb); pos = skb_headlen(skb); @@ -2884,6 +2891,12 @@ skip_fraglist: nskb->data_len = len - hsize; nskb->len += nskb->data_len; nskb->truesize += nskb->data_len; + + if (!csum) { + nskb->csum = skb_checksum(nskb, doffset, + nskb->len - doffset, 0); + nskb->ip_summed = CHECKSUM_NONE; + } } while ((offset += len) < skb->len); return segs; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..dc3f677360a5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1284,9 +1284,6 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int id; unsigned int offset = 0; - if (!(features & NETIF_F_V4_CSUM)) - features &= ~NETIF_F_SG; - if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_UDP | diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 8234c1dcdf72..7a0d25a5479c 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -92,9 +92,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u8 *prevhdr; int offset = 0; - if (!(features & NETIF_F_V6_CSUM)) - features &= ~NETIF_F_SG; - if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | -- cgit v1.2.3-70-g09d2 From aefbd2b3c2a9c657605e4337f9919d6c6273e8e6 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 7 Mar 2013 13:21:46 +0000 Subject: tunneling: Capture inner mac header during encapsulation. This patch adds inner mac header. This will be used in next patch to find tunner header length. Header len is required to copy tunnel header to each gso segment. This patch does not change any functionality. Signed-off-by: Pravin B Shelar Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/skbuff.h | 34 ++++++++++++++++++++++++++++++++++ net/core/skbuff.c | 2 ++ 2 files changed, 36 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 821c7f45d2a7..d7f96ff68f77 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -387,6 +387,7 @@ typedef unsigned char *sk_buff_data_t; * @vlan_tci: vlan tag control information * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) + * @inner_mac_header: Link layer header (encapsulation) * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header @@ -505,6 +506,7 @@ struct sk_buff { sk_buff_data_t inner_transport_header; sk_buff_data_t inner_network_header; + sk_buff_data_t inner_mac_header; sk_buff_data_t transport_header; sk_buff_data_t network_header; sk_buff_data_t mac_header; @@ -1466,6 +1468,7 @@ static inline void skb_reserve(struct sk_buff *skb, int len) static inline void skb_reset_inner_headers(struct sk_buff *skb) { + skb->inner_mac_header = skb->mac_header; skb->inner_network_header = skb->network_header; skb->inner_transport_header = skb->transport_header; } @@ -1511,6 +1514,22 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb, skb->inner_network_header += offset; } +static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_mac_header; +} + +static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data - skb->head; +} + +static inline void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_mac_header(skb); + skb->inner_mac_header += offset; +} static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { return skb->transport_header != ~0U; @@ -1604,6 +1623,21 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb, skb->inner_network_header = skb->data + offset; } +static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->inner_mac_header; +} + +static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data; +} + +static inline void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb->inner_mac_header = skb->data + offset; +} static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { return skb->transport_header != NULL; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0278c7f787bf..31c6737d3189 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -673,6 +673,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->mac_header = old->mac_header; new->inner_transport_header = old->inner_transport_header; new->inner_network_header = old->inner_network_header; + new->inner_mac_header = old->inner_mac_header; skb_dst_copy(new, old); new->rxhash = old->rxhash; new->ooo_okay = old->ooo_okay; @@ -876,6 +877,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off) skb->mac_header += off; skb->inner_transport_header += off; skb->inner_network_header += off; + skb->inner_mac_header += off; } static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) -- cgit v1.2.3-70-g09d2 From 731362674580cb0c696cd1b1a03d8461a10cf90a Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 7 Mar 2013 13:21:51 +0000 Subject: tunneling: Add generic Tunnel segmentation. Adds generic tunneling offloading support for IPv4-UDP based tunnels. GSO type is added to request this offload for a skb. netdev feature NETIF_F_UDP_TUNNEL is added for hardware offloaded udp-tunnel support. Currently no device supports this feature, software offload is used. This can be used by tunneling protocols like VXLAN. CC: Jesse Gross Signed-off-by: Pravin B Shelar Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 7 +-- include/linux/skbuff.h | 2 + net/core/ethtool.c | 1 + net/ipv4/af_inet.c | 6 ++- net/ipv4/tcp.c | 1 + net/ipv4/udp.c | 115 +++++++++++++++++++++++++++++++--------- net/ipv6/ip6_offload.c | 1 + net/ipv6/udp_offload.c | 8 ++- 8 files changed, 111 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 3dd39340430e..f5e797c0c2a4 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -42,9 +42,9 @@ enum { NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ - /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ - NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ - = NETIF_F_GSO_LAST, + NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ + NETIF_F_GSO_UDP_TUNNEL_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -103,6 +103,7 @@ enum { #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GRE_GSO __NETIF_F(GSO_GRE) +#define NETIF_F_UDP_TUNNEL __NETIF_F(UDP_TUNNEL) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d7f96ff68f77..eb2106fe3bb4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -316,6 +316,8 @@ enum { SKB_GSO_FCOE = 1 << 5, SKB_GSO_GRE = 1 << 6, + + SKB_GSO_UDP_TUNNEL = 1 << 7, }; #if BITS_PER_LONG > 32 diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 3e9b2c3e30f0..adc1351e6873 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -78,6 +78,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", + [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index dc3f677360a5..9e5882caf8a7 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1283,6 +1283,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int ihl; int id; unsigned int offset = 0; + bool tunnel; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | @@ -1290,6 +1291,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | 0))) goto out; @@ -1304,6 +1306,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, ihl))) goto out; + tunnel = !!skb->encapsulation; + __skb_pull(skb, ihl); skb_reset_transport_header(skb); iph = ip_hdr(skb); @@ -1323,7 +1327,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb = segs; do { iph = ip_hdr(skb); - if (proto == IPPROTO_UDP) { + if (!tunnel && proto == IPPROTO_UDP) { iph->id = htons(id); iph->frag_off = htons(offset >> 3); if (skb->next != NULL) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..8d14573ade77 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3044,6 +3044,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..41760e043bf5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2272,31 +2272,88 @@ void __init udp_init(void) int udp4_ufo_send_check(struct sk_buff *skb) { - const struct iphdr *iph; - struct udphdr *uh; - - if (!pskb_may_pull(skb, sizeof(*uh))) + if (!pskb_may_pull(skb, sizeof(struct udphdr))) return -EINVAL; - iph = ip_hdr(skb); - uh = udp_hdr(skb); + if (likely(!skb->encapsulation)) { + const struct iphdr *iph; + struct udphdr *uh; - uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; + iph = ip_hdr(skb); + uh = udp_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, + IPPROTO_UDP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + } return 0; } +static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + int mac_len = skb->mac_len; + int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + int outer_hlen; + netdev_features_t enc_features; + + if (unlikely(!pskb_may_pull(skb, tnl_hlen))) + goto out; + + skb->encapsulation = 0; + __skb_pull(skb, tnl_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb_inner_network_offset(skb)); + skb->mac_len = skb_inner_network_offset(skb); + + /* segment inner packet. */ + enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, enc_features); + if (!segs || IS_ERR(segs)) + goto out; + + outer_hlen = skb_tnl_header_len(skb); + skb = segs; + do { + struct udphdr *uh; + int udp_offset = outer_hlen - tnl_hlen; + + skb->mac_len = mac_len; + + skb_push(skb, outer_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, mac_len); + skb_set_transport_header(skb, udp_offset); + uh = udp_hdr(skb); + uh->len = htons(skb->len - udp_offset); + + /* csum segment if tunnel sets skb with csum. */ + if (unlikely(uh->check)) { + struct iphdr *iph = ip_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - udp_offset, + IPPROTO_UDP, 0); + uh->check = csum_fold(skb_checksum(skb, udp_offset, + skb->len - udp_offset, 0)); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + } + skb->ip_summed = CHECKSUM_NONE; + } while ((skb = skb->next)); +out: + return segs; +} + struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; - int offset; - __wsum csum; - mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; @@ -2306,6 +2363,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE) || !(type & (SKB_GSO_UDP)))) goto out; @@ -2316,20 +2374,27 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, goto out; } - /* Do software UFO. Complete and fill in the UDP checksum as HW cannot - * do checksum of UDP packets sent as multiple IP fragments. - */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); - skb->ip_summed = CHECKSUM_NONE; - /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ - segs = skb_segment(skb, features); + if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) + segs = skb_udp_tunnel_segment(skb, features); + else { + int offset; + __wsum csum; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + offset = skb_checksum_start_offset(skb); + csum = skb_checksum(skb, offset, skb->len - offset, 0); + offset += skb->csum_offset; + *(__sum16 *)(skb->data + offset) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + + segs = skb_segment(skb, features); + } out: return segs; } - diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 7a0d25a5479c..71b766ee821d 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -97,6 +97,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | SKB_GSO_TCPV6 | 0))) goto out; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index cf05cf073c51..3bb3a891a424 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -21,6 +21,10 @@ static int udp6_ufo_send_check(struct sk_buff *skb) const struct ipv6hdr *ipv6h; struct udphdr *uh; + /* UDP Tunnel offload on ipv6 is not yet supported. */ + if (skb->encapsulation) + return -EINVAL; + if (!pskb_may_pull(skb, sizeof(*uh))) return -EINVAL; @@ -56,7 +60,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; - if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + if (unlikely(type & ~(SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE) || !(type & (SKB_GSO_UDP)))) goto out; -- cgit v1.2.3-70-g09d2 From e61667af2f77d481411f2ccd307fed2247d785a8 Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Sun, 10 Mar 2013 05:18:39 +0000 Subject: tcp: Remove unused tw_cookie_values from tcp_timewait_sock tw_cookie_values is never used in the TCP-stack. It was added by 435cf559f (TCPCT part 1d: define TCP cookie option, extend existing struct's), but already at that time it was not used at all, nor mentioned in the commit-message. Signed-off-by: Christoph Paasch Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f28408c07dc2..515c3746b675 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -361,10 +361,6 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif - /* Few sockets in timewait have cookies; in that case, then this - * object holds a reference to them (tw_cookie_values->kref). - */ - struct tcp_cookie_values *tw_cookie_values; }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) -- cgit v1.2.3-70-g09d2 From 26fd76cab2e61cedc5c25f7151fb31b57ddc53c7 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 22 Feb 2013 10:53:25 +0100 Subject: NFC: llcp: Implement socket options Some LLCP services (e.g. the validation ones) require some control over the LLCP link parameters like the receive window (RW) or the MIU extension (MIUX). This can only be done through socket options. Signed-off-by: Samuel Ortiz --- include/linux/socket.h | 1 + include/uapi/linux/nfc.h | 4 ++ net/nfc/llcp/llcp.h | 3 ++ net/nfc/llcp/sock.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 125 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index 2b9f74b0ffea..428c37a1f95c 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -298,6 +298,7 @@ struct ucred { #define SOL_IUCV 277 #define SOL_CAIF 278 #define SOL_ALG 279 +#define SOL_NFC 280 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 7969f46f1bb3..855630fe731d 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -220,4 +220,8 @@ struct sockaddr_nfc_llcp { #define NFC_LLCP_DIRECTION_RX 0x00 #define NFC_LLCP_DIRECTION_TX 0x01 +/* socket option names */ +#define NFC_LLCP_RW 0 +#define NFC_LLCP_MIUX 1 + #endif /*__LINUX_NFC_H */ diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h index 32cec81939e6..5f117adac2e5 100644 --- a/net/nfc/llcp/llcp.h +++ b/net/nfc/llcp/llcp.h @@ -104,6 +104,9 @@ struct nfc_llcp_sock { u8 dsap; char *service_name; size_t service_name_len; + u8 rw; + u16 miux; + /* Remote link parameters */ u8 remote_rw; diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index cc564992ba95..9357a756f7a9 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -223,6 +223,121 @@ error: return ret; } +static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); + u32 opt; + int err = 0; + + pr_debug("%p optname %d\n", sk, optname); + + if (level != SOL_NFC) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case NFC_LLCP_RW: + if (sk->sk_state == LLCP_CONNECTED || + sk->sk_state == LLCP_BOUND || + sk->sk_state == LLCP_LISTEN) { + err = -EINVAL; + break; + } + + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + if (opt > LLCP_MAX_RW) { + err = -EINVAL; + break; + } + + llcp_sock->rw = (u8) opt; + + break; + + case NFC_LLCP_MIUX: + if (sk->sk_state == LLCP_CONNECTED || + sk->sk_state == LLCP_BOUND || + sk->sk_state == LLCP_LISTEN) { + err = -EINVAL; + break; + } + + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + if (opt > LLCP_MAX_MIUX) { + err = -EINVAL; + break; + } + + llcp_sock->miux = (u16) opt; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + + return err; +} + +static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); + int len, err = 0; + + pr_debug("%p optname %d\n", sk, optname); + + if (level != SOL_NFC) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + len = min_t(u32, len, sizeof(u32)); + + lock_sock(sk); + + switch (optname) { + case NFC_LLCP_RW: + if (put_user(llcp_sock->rw, (u32 __user *) optval)) + err = -EFAULT; + + break; + + case NFC_LLCP_MIUX: + if (put_user(llcp_sock->miux, (u32 __user *) optval)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + + if (put_user(len, optlen)) + return -EFAULT; + + return err; +} + void nfc_llcp_accept_unlink(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); @@ -735,8 +850,8 @@ static const struct proto_ops llcp_sock_ops = { .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, + .setsockopt = nfc_llcp_setsockopt, + .getsockopt = nfc_llcp_getsockopt, .sendmsg = llcp_sock_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, -- cgit v1.2.3-70-g09d2 From b818d1a7f72575eef17e00dc4085512c9cc8897d Mon Sep 17 00:00:00 2001 From: Hector Palacios Date: Sun, 10 Mar 2013 22:50:02 +0000 Subject: phy/micrel: Add support for KSZ8031 Micrel PHY KSZ8031 is similar to KSZ8021 and also requires the special initialization of "Operation Mode Strap Override" in reg 0x16 introduced in 212ea99 (phy/micrel: Implement support for KSZ8021). Signed-off-by: Hector Palacios Reviewed-by: Marek Vasut Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 14 ++++++++++++++ include/linux/micrel_phy.h | 1 + 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index abf7b6153d00..018af1852fe1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -191,6 +191,19 @@ static struct phy_driver ksphy_driver[] = { .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_KSZ8031, + .phy_id_mask = 0x00ffffff, + .name = "Micrel KSZ8031", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = ksz8021_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_KSZ8041, .phy_id_mask = 0x00fffff0, @@ -325,6 +338,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ8001, 0x00ffffff }, { PHY_ID_KS8737, 0x00fffff0 }, { PHY_ID_KSZ8021, 0x00ffffff }, + { PHY_ID_KSZ8031, 0x00ffffff }, { PHY_ID_KSZ8041, 0x00fffff0 }, { PHY_ID_KSZ8051, 0x00fffff0 }, { PHY_ID_KSZ8061, 0x00fffff0 }, diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 9dbb41a4e250..8752dbbc6135 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -19,6 +19,7 @@ #define PHY_ID_KSZ9021 0x00221610 #define PHY_ID_KS8737 0x00221720 #define PHY_ID_KSZ8021 0x00221555 +#define PHY_ID_KSZ8031 0x00221556 #define PHY_ID_KSZ8041 0x00221510 #define PHY_ID_KSZ8051 0x00221550 /* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ -- cgit v1.2.3-70-g09d2 From 6ba8a3b19e764b6a65e4030ab0999be50c291e6c Mon Sep 17 00:00:00 2001 From: Nandita Dukkipati Date: Mon, 11 Mar 2013 10:00:43 +0000 Subject: tcp: Tail loss probe (TLP) This patch series implement the Tail loss probe (TLP) algorithm described in http://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01. The first patch implements the basic algorithm. TLP's goal is to reduce tail latency of short transactions. It achieves this by converting retransmission timeouts (RTOs) occuring due to tail losses (losses at end of transactions) into fast recovery. TLP transmits one packet in two round-trips when a connection is in Open state and isn't receiving any ACKs. The transmitted packet, aka loss probe, can be either new or a retransmission. When there is tail loss, the ACK from a loss probe triggers FACK/early-retransmit based fast recovery, thus avoiding a costly RTO. In the absence of loss, there is no change in the connection state. PTO stands for probe timeout. It is a timer event indicating that an ACK is overdue and triggers a loss probe packet. The PTO value is set to max(2*SRTT, 10ms) and is adjusted to account for delayed ACK timer when there is only one oustanding packet. TLP Algorithm On transmission of new data in Open state: -> packets_out > 1: schedule PTO in max(2*SRTT, 10ms). -> packets_out == 1: schedule PTO in max(2*RTT, 1.5*RTT + 200ms) -> PTO = min(PTO, RTO) Conditions for scheduling PTO: -> Connection is in Open state. -> Connection is either cwnd limited or no new data to send. -> Number of probes per tail loss episode is limited to one. -> Connection is SACK enabled. When PTO fires: new_segment_exists: -> transmit new segment. -> packets_out++. cwnd remains same. no_new_packet: -> retransmit the last segment. Its ACK triggers FACK or early retransmit based recovery. ACK path: -> rearm RTO at start of ACK processing. -> reschedule PTO if need be. In addition, the patch includes a small variation to the Early Retransmit (ER) algorithm, such that ER and TLP together can in principle recover any N-degree of tail loss through fast recovery. TLP is controlled by the same sysctl as ER, tcp_early_retrans sysctl. tcp_early_retrans==0; disables TLP and ER. ==1; enables RFC5827 ER. ==2; delayed ER. ==3; TLP and delayed ER. [DEFAULT] ==4; TLP only. The TLP patch series have been extensively tested on Google Web servers. It is most effective for short Web trasactions, where it reduced RTOs by 15% and improved HTTP response time (average by 6%, 99th percentile by 10%). The transmitted probes account for <0.5% of the overall transmissions. Signed-off-by: Nandita Dukkipati Acked-by: Neal Cardwell Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 8 ++- include/linux/tcp.h | 1 - include/net/inet_connection_sock.h | 5 +- include/net/tcp.h | 6 +- include/uapi/linux/snmp.h | 1 + net/ipv4/inet_diag.c | 4 +- net/ipv4/proc.c | 1 + net/ipv4/sysctl_net_ipv4.c | 4 +- net/ipv4/tcp_input.c | 24 ++++--- net/ipv4/tcp_ipv4.c | 4 +- net/ipv4/tcp_output.c | 128 +++++++++++++++++++++++++++++++-- net/ipv4/tcp_timer.c | 13 ++-- 12 files changed, 171 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index dc2dc87d2557..1cae6c383e1b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -190,7 +190,9 @@ tcp_early_retrans - INTEGER Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold for triggering fast retransmit when the amount of outstanding data is small and when no previously unsent data can be transmitted (such - that limited transmit could be used). + that limited transmit could be used). Also controls the use of + Tail loss probe (TLP) that converts RTOs occuring due to tail + losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01). Possible values: 0 disables ER 1 enables ER @@ -198,7 +200,9 @@ tcp_early_retrans - INTEGER by a fourth of RTT. This mitigates connection falsely recovers when network has a small degree of reordering (less than 3 packets). - Default: 2 + 3 enables delayed ER and TLP. + 4 enables TLP only. + Default: 3 tcp_ecn - INTEGER Control use of Explicit Congestion Notification (ECN) by TCP. diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 515c3746b675..01860d74555c 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -201,7 +201,6 @@ struct tcp_sock { unused : 1; u8 repair_queue; u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ - early_retrans_delayed:1, /* Delayed ER timer installed */ syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 183292722f6e..de2c78529afa 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -133,6 +133,8 @@ struct inet_connection_sock { #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ +#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ +#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ static inline struct inet_connection_sock *inet_csk(const struct sock *sk) { @@ -222,7 +224,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, when = max_when; } - if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { + if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || + what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending = what; icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); diff --git a/include/net/tcp.h b/include/net/tcp.h index a2baa5e4ba31..ab9f947b118b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -543,6 +543,8 @@ extern bool tcp_syn_flood_action(struct sock *sk, extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); +extern void tcp_send_loss_probe(struct sock *sk); +extern bool tcp_schedule_loss_probe(struct sock *sk); /* tcp_input.c */ extern void tcp_cwnd_application_limited(struct sock *sk); @@ -873,8 +875,8 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) static inline void tcp_enable_early_retrans(struct tcp_sock *tp) { tp->do_early_retrans = sysctl_tcp_early_retrans && - !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3; - tp->early_retrans_delayed = 0; + sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && + sysctl_tcp_reordering == 3; } static inline void tcp_disable_early_retrans(struct tcp_sock *tp) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index b49eab89c9fd..290bed6b085f 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -202,6 +202,7 @@ enum LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */ LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */ LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */ + LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */ LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */ LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */ LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */ diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 7afa2c3c788f..8620408af574 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -158,7 +158,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) - if (icsk->icsk_pending == ICSK_TIME_RETRANS) { + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 32030a24e776..4c35911d935f 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -224,6 +224,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS), SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), + SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES), SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED), diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 960fd29d9b8e..cca4550f4082 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -28,7 +28,7 @@ static int zero; static int one = 1; -static int two = 2; +static int four = 4; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; @@ -760,7 +760,7 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, - .extra2 = &two, + .extra2 = &four, }, { .procname = "udp_mem", diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99f..b794f89ac1f2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -98,7 +98,7 @@ int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; -int sysctl_tcp_early_retrans __read_mostly = 2; +int sysctl_tcp_early_retrans __read_mostly = 3; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@ -2150,15 +2150,16 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag) * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ - if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) + if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || + (flag & FLAG_ECE) || !tp->srtt) return false; delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); - tp->early_retrans_delayed = 1; + inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, + TCP_RTO_MAX); return true; } @@ -2321,7 +2322,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && - (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && + (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); @@ -3081,6 +3082,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) */ void tcp_rearm_rto(struct sock *sk) { + const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* If the retrans timer is currently being used by Fast Open @@ -3094,12 +3096,13 @@ void tcp_rearm_rto(struct sock *sk) } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ - if (tp->early_retrans_delayed) { + if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked - * when the delayed ER timer fires and is rescheduled. + * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; @@ -3107,7 +3110,6 @@ void tcp_rearm_rto(struct sock *sk) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } - tp->early_retrans_delayed = 0; } /* This function is called when the delayed ER timer fires. TCP enters @@ -3601,7 +3603,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (after(ack, tp->snd_nxt)) goto invalid_ack; - if (tp->early_retrans_delayed) + if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); if (after(ack, prior_snd_una)) @@ -3678,6 +3681,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (dst) dst_confirm(dst); } + + if (icsk->icsk_pending == ICSK_TIME_RETRANS) + tcp_schedule_loss_probe(sk); return 1; no_queue: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8cdee120a50c..b7ab868c8284 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2703,7 +2703,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) __u16 srcp = ntohs(inet->inet_sport); int rx_queue; - if (icsk->icsk_pending == ICSK_TIME_RETRANS) { + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..beb63dbc85f5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -74,6 +74,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Account for new data that has been sent to the network. */ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); unsigned int prior_packets = tp->packets_out; @@ -85,7 +86,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tp->frto_counter = 3; tp->packets_out += tcp_skb_pcount(skb); - if (!prior_packets || tp->early_retrans_delayed) + if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); } @@ -1959,6 +1961,9 @@ static int tcp_mtu_probe(struct sock *sk) * snd_up-64k-mss .. snd_up cannot be large. However, taking into * account rare use of URG, this is not a big flaw. * + * Send at most one packet when push_one > 0. Temporarily ignore + * cwnd limit to force at most one packet out when push_one == 2. + * Returns true, if no segments are in flight and we have queued segments, * but cannot send anything now because of SWS or another problem. */ @@ -1994,8 +1999,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, goto repair; /* Skip network transmission */ cwnd_quota = tcp_cwnd_test(tp, skb); - if (!cwnd_quota) - break; + if (!cwnd_quota) { + if (push_one == 2) + /* Force out a loss probe pkt. */ + cwnd_quota = 1; + else + break; + } if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; @@ -2049,10 +2059,120 @@ repair: if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; + + /* Send one loss probe per tail loss episode. */ + if (push_one != 2) + tcp_schedule_loss_probe(sk); tcp_cwnd_validate(sk); return false; } - return !tp->packets_out && tcp_send_head(sk); + return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); +} + +bool tcp_schedule_loss_probe(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + u32 timeout, tlp_time_stamp, rto_time_stamp; + u32 rtt = tp->srtt >> 3; + + if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) + return false; + /* No consecutive loss probes. */ + if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { + tcp_rearm_rto(sk); + return false; + } + /* Don't do any loss probe on a Fast Open connection before 3WHS + * finishes. + */ + if (sk->sk_state == TCP_SYN_RECV) + return false; + + /* TLP is only scheduled when next timer event is RTO. */ + if (icsk->icsk_pending != ICSK_TIME_RETRANS) + return false; + + /* Schedule a loss probe in 2*RTT for SACK capable connections + * in Open state, that are either limited by cwnd or application. + */ + if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || + !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) + return false; + + if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && + tcp_send_head(sk)) + return false; + + /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account + * for delayed ack when there's one outstanding packet. + */ + timeout = rtt << 1; + if (tp->packets_out == 1) + timeout = max_t(u32, timeout, + (rtt + (rtt >> 1) + TCP_DELACK_MAX)); + timeout = max_t(u32, timeout, msecs_to_jiffies(10)); + + /* If RTO is shorter, just schedule TLP in its place. */ + tlp_time_stamp = tcp_time_stamp + timeout; + rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; + if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { + s32 delta = rto_time_stamp - tcp_time_stamp; + if (delta > 0) + timeout = delta; + } + + inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, + TCP_RTO_MAX); + return true; +} + +/* When probe timeout (PTO) fires, send a new segment if one exists, else + * retransmit the last segment. + */ +void tcp_send_loss_probe(struct sock *sk) +{ + struct sk_buff *skb; + int pcount; + int mss = tcp_current_mss(sk); + int err = -1; + + if (tcp_send_head(sk) != NULL) { + err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); + goto rearm_timer; + } + + /* Retransmit last segment. */ + skb = tcp_write_queue_tail(sk); + if (WARN_ON(!skb)) + goto rearm_timer; + + pcount = tcp_skb_pcount(skb); + if (WARN_ON(!pcount)) + goto rearm_timer; + + if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { + if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) + goto rearm_timer; + skb = tcp_write_queue_tail(sk); + } + + if (WARN_ON(!skb || !tcp_skb_pcount(skb))) + goto rearm_timer; + + /* Probe with zero data doesn't trigger fast recovery. */ + if (skb->len > 0) + err = __tcp_retransmit_skb(sk, skb); + +rearm_timer: + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + inet_csk(sk)->icsk_rto, + TCP_RTO_MAX); + + if (likely(!err)) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPLOSSPROBES); + return; } /* Push out any pending frames which were held back due to diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b78aac30c498..ecd61d54147f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -342,10 +342,6 @@ void tcp_retransmit_timer(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); - if (tp->early_retrans_delayed) { - tcp_resume_early_retransmit(sk); - return; - } if (tp->fastopen_rsk) { WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); @@ -495,13 +491,20 @@ void tcp_write_timer_handler(struct sock *sk) } event = icsk->icsk_pending; - icsk->icsk_pending = 0; switch (event) { + case ICSK_TIME_EARLY_RETRANS: + tcp_resume_early_retransmit(sk); + break; + case ICSK_TIME_LOSS_PROBE: + tcp_send_loss_probe(sk); + break; case ICSK_TIME_RETRANS: + icsk->icsk_pending = 0; tcp_retransmit_timer(sk); break; case ICSK_TIME_PROBE0: + icsk->icsk_pending = 0; tcp_probe_timer(sk); break; } -- cgit v1.2.3-70-g09d2 From 9b717a8d245075ffb8e95a2dfb4ee97ce4747457 Mon Sep 17 00:00:00 2001 From: Nandita Dukkipati Date: Mon, 11 Mar 2013 10:00:44 +0000 Subject: tcp: TLP loss detection. This is the second of the TLP patch series; it augments the basic TLP algorithm with a loss detection scheme. This patch implements a mechanism for loss detection when a Tail loss probe retransmission plugs a hole thereby masking packet loss from the sender. The loss detection algorithm relies on counting TLP dupacks as outlined in Sec. 3 of: http://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 The basic idea is: Sender keeps track of TLP "episode" upon retransmission of a TLP packet. An episode ends when the sender receives an ACK above the SND.NXT (tracked by tlp_high_seq) at the time of the episode. We want to make sure that before the episode ends the sender receives a "TLP dupack", indicating that the TLP retransmission was unnecessary, so there was no loss/hole that needed plugging. If the sender gets no TLP dupack before the end of the episode, then it reduces ssthresh and the congestion window, because the TLP packet arriving at the receiver probably plugged a hole. Signed-off-by: Nandita Dukkipati Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + net/ipv4/tcp_input.c | 39 +++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp_minisocks.c | 1 + net/ipv4/tcp_output.c | 9 +++++++++ net/ipv4/tcp_timer.c | 2 ++ 7 files changed, 54 insertions(+) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 01860d74555c..763c108ee03d 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -204,6 +204,7 @@ struct tcp_sock { syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ u32 srtt; /* smoothed round trip time << 3 */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 290bed6b085f..e00013a1debc 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -203,6 +203,7 @@ enum LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */ LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */ LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */ + LINUX_MIB_TCPLOSSPROBERECOVERY, /* TCPLossProbeRecovery */ LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */ LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */ LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4c35911d935f..b6f2ea174898 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -225,6 +225,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES), + SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY), SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b794f89ac1f2..836d74dd0187 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2682,6 +2682,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) struct tcp_sock *tp = tcp_sk(sk); tp->high_seq = tp->snd_nxt; + tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; @@ -3569,6 +3570,38 @@ static void tcp_send_challenge_ack(struct sock *sk) } } +/* This routine deals with acks during a TLP episode. + * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. + */ +static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) +{ + struct tcp_sock *tp = tcp_sk(sk); + bool is_tlp_dupack = (ack == tp->tlp_high_seq) && + !(flag & (FLAG_SND_UNA_ADVANCED | + FLAG_NOT_DUP | FLAG_DATA_SACKED)); + + /* Mark the end of TLP episode on receiving TLP dupack or when + * ack is after tlp_high_seq. + */ + if (is_tlp_dupack) { + tp->tlp_high_seq = 0; + return; + } + + if (after(ack, tp->tlp_high_seq)) { + tp->tlp_high_seq = 0; + /* Don't reduce cwnd if DSACK arrives for TLP retrans. */ + if (!(flag & FLAG_DSACKING_ACK)) { + tcp_init_cwnd_reduction(sk, true); + tcp_set_ca_state(sk, TCP_CA_CWR); + tcp_end_cwnd_reduction(sk); + tcp_set_ca_state(sk, TCP_CA_Open); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPLOSSPROBERECOVERY); + } + } +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@ -3676,6 +3709,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_cong_avoid(sk, ack, prior_in_flight); } + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) @@ -3697,6 +3733,9 @@ no_queue: */ if (tcp_send_head(sk)) tcp_ack_probe(sk); + + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); return 1; invalid_ack: diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index b83a49cc3816..4bdb09fca401 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -440,6 +440,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->fackets_out = 0; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tcp_enable_early_retrans(newtp); + newtp->tlp_high_seq = 0; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index beb63dbc85f5..8e7742f0b5d2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2132,6 +2132,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) */ void tcp_send_loss_probe(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int pcount; int mss = tcp_current_mss(sk); @@ -2142,6 +2143,10 @@ void tcp_send_loss_probe(struct sock *sk) goto rearm_timer; } + /* At most one outstanding TLP retransmission. */ + if (tp->tlp_high_seq) + goto rearm_timer; + /* Retransmit last segment. */ skb = tcp_write_queue_tail(sk); if (WARN_ON(!skb)) @@ -2164,6 +2169,10 @@ void tcp_send_loss_probe(struct sock *sk) if (skb->len > 0) err = __tcp_retransmit_skb(sk, skb); + /* Record snd_nxt for loss detection. */ + if (likely(!err)) + tp->tlp_high_seq = tp->snd_nxt; + rearm_timer: inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ecd61d54147f..eeccf795e917 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -356,6 +356,8 @@ void tcp_retransmit_timer(struct sock *sk) WARN_ON(tcp_write_queue_empty(sk)); + tp->tlp_high_seq = 0; + if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { /* Receiver dastardly shrinks window. Our retransmits -- cgit v1.2.3-70-g09d2 From 42e836eb4527fb635cb799a701fe4c9fe741c03a Mon Sep 17 00:00:00 2001 From: Michael Stapelberg Date: Mon, 11 Mar 2013 13:56:44 +0000 Subject: phy: add set_wol/get_wol functions This allows ethernet drivers (such as the mv643xx_eth) to support Wake on LAN on platforms where PHY registers have to be configured for Wake on LAN (e.g. the Marvell Kirkwood based qnap TS-119P II). Signed-off-by: Michael Stapelberg Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 16 ++++++++++++++++ include/linux/phy.h | 10 ++++++++++ 2 files changed, 26 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ef9ea9248223..298b4c201733 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1188,3 +1188,19 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) return 0; } EXPORT_SYMBOL(phy_ethtool_set_eee); + +int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) +{ + if (phydev->drv->set_wol) + return phydev->drv->set_wol(phydev, wol); + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(phy_ethtool_set_wol); + +void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) +{ + if (phydev->drv->get_wol) + phydev->drv->get_wol(phydev, wol); +} +EXPORT_SYMBOL(phy_ethtool_get_wol); diff --git a/include/linux/phy.h b/include/linux/phy.h index 33999adbf8c8..9e11039dd7a3 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -455,6 +455,14 @@ struct phy_driver { */ void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + /* Some devices (e.g. qnap TS-119P II) require PHY register changes to + * enable Wake on LAN, so set_wol is provided to be called in the + * ethernet driver's set_wol function. */ + int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); + + /* See set_wol, but for checking whether Wake on LAN is enabled. */ + void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) @@ -560,6 +568,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); +int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); int __init mdio_bus_init(void); void mdio_bus_exit(void); -- cgit v1.2.3-70-g09d2 From e86ac13b031cf71d8f40ff513e627aac80e6b765 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Mon, 11 Mar 2013 23:16:35 +0000 Subject: drivers: net: ethernet: cpsw: change cpts_active_slave to active_slave Change cpts_active_slave to active_slave so that the same DT property can be used to ethtool and SIOCGMIIPHY. CC: Richard Cochran Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/cpsw.txt | 7 ++++--- arch/arm/boot/dts/am33xx.dtsi | 2 +- drivers/net/ethernet/ti/cpsw.c | 10 +++++----- include/linux/platform_data/cpsw.h | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 8e49c4200928..4f2ca6b4a182 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt @@ -15,7 +15,8 @@ Required properties: - mac_control : Specifies Default MAC control register content for the specific platform - slaves : Specifies number for slaves -- cpts_active_slave : Specifies the slave to use for time stamping +- active_slave : Specifies the slave to use for time stamping, + ethtool and SIOCGMIIPHY - cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds - cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds @@ -52,7 +53,7 @@ Examples: rx_descs = <64>; mac_control = <0x20>; slaves = <2>; - cpts_active_slave = <0>; + active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; cpsw_emac0: slave@0 { @@ -78,7 +79,7 @@ Examples: rx_descs = <64>; mac_control = <0x20>; slaves = <2>; - cpts_active_slave = <0>; + active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; cpsw_emac0: slave@0 { diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 0957645b73af..91fe4f148f80 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -349,7 +349,7 @@ rx_descs = <64>; mac_control = <0x20>; slaves = <2>; - cpts_active_slave = <0>; + active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; reg = <0x4a100000 0x800 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 01ffbc486982..98aa17a9516a 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -942,7 +942,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; u32 ts_en, seq_id; if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { @@ -971,7 +971,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) if (priv->data.dual_emac) slave = &priv->slaves[priv->emac_port]; else - slave = &priv->slaves[priv->data.cpts_active_slave]; + slave = &priv->slaves[priv->data.active_slave]; ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; @@ -1282,12 +1282,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->slaves = prop; - if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - pr_err("Missing cpts_active_slave property in the DT.\n"); + if (of_property_read_u32(node, "active_slave", &prop)) { + pr_err("Missing active_slave property in the DT.\n"); ret = -EINVAL; goto error_ret; } - data->cpts_active_slave = prop; + data->active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h index 798fb80b024b..bb3cd58d71e3 100644 --- a/include/linux/platform_data/cpsw.h +++ b/include/linux/platform_data/cpsw.h @@ -30,7 +30,7 @@ struct cpsw_platform_data { u32 channels; /* number of cpdma channels (symmetric) */ u32 slaves; /* number of slave cpgmac ports */ struct cpsw_slave_data *slave_data; - u32 cpts_active_slave; /* time stamping slave */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ u32 ale_entries; /* ale table size */ -- cgit v1.2.3-70-g09d2 From 8a7fbfab4be39b8690543f3d29b26860d2f6c576 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Tue, 12 Mar 2013 02:49:01 +0000 Subject: netxen: write IP address to firmware when using bonding This patch allows LRO aggregation on bonded devices that contain an NX3031 device. It also adds a for_each_netdev_in_bond_rcu(bond, slave) macro which executes for each slave that has bond as master. V3: After testing and discussing this with Rajesh, I decided to keep the vlan ip cache and just rename it to ip_cache since it will store bond ip addresses too. A new master flag has been added to the ip cache to denote that the address has been added because of a master device. I've taken care of the enslave/release cases by checking for various combinations of events and flags (e.g. netxen has a master, it's a bond master and it's not marked as a slave means it is being enslaved and is dev_open()ed in bond_enslave). I've changed netxen_free_ip_list() to have a "master" parameter which causes all IP addresses marked as master to be deleted (used when a netxen is being released). I've made the patch use the new upper device API as well. The following cases were tested: - bond -> netxen - vlan -> netxen - vlan -> bond -> netxen V2: Remove local ip caching, retrieve addresses dynamically and restore them if necessary. Note: Tested with NX3031 adapter. Tested-by: Rajesh Borundia Signed-off-by: Andy Gospodarek Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic.h | 5 +- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 220 ++++++++++++++------- include/linux/netdevice.h | 8 + 3 files changed, 155 insertions(+), 78 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index eb3dfdbb642b..322a36b76727 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -955,9 +955,10 @@ typedef struct nx_mac_list_s { uint8_t mac_addr[ETH_ALEN+2]; } nx_mac_list_t; -struct nx_vlan_ip_list { +struct nx_ip_list { struct list_head list; __be32 ip_addr; + bool master; }; /* @@ -1605,7 +1606,7 @@ struct netxen_adapter { struct net_device *netdev; struct pci_dev *pdev; struct list_head mac_list; - struct list_head vlan_ip_list; + struct list_head ip_list; spinlock_t tx_clean_lock; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 501f49207da5..7867aebc05f2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data); static irqreturn_t netxen_msi_intr(int irq, void *data); static irqreturn_t netxen_msix_intr(int irq, void *data); -static void netxen_free_vlan_ip_list(struct netxen_adapter *); +static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); @@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); - INIT_LIST_HEAD(&adapter->vlan_ip_list); + INIT_LIST_HEAD(&adapter->ip_list); err = netxen_setup_pci_map(adapter); if (err) @@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->tx_timeout_task); - netxen_free_vlan_ip_list(adapter); + netxen_free_ip_list(adapter, false); netxen_nic_detach(adapter); nx_decr_dev_ref_cnt(adapter); @@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter) } static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { - struct nx_vlan_ip_list *cur; - struct list_head *head = &adapter->vlan_ip_list; + struct nx_ip_list *cur, *tmp_cur; - while (!list_empty(head)) { - cur = list_entry(head->next, struct nx_vlan_ip_list, list); - netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); - list_del(&cur->list); - kfree(cur); + list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { + if (master) { + if (cur->master) { + netxen_config_ipaddr(adapter, cur->ip_addr, + NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + } else { + netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } } - } -static void -netxen_list_config_vlan_ip(struct netxen_adapter *adapter, + +static bool +netxen_list_config_ip(struct netxen_adapter *adapter, struct in_ifaddr *ifa, unsigned long event) { struct net_device *dev; - struct nx_vlan_ip_list *cur, *tmp_cur; + struct nx_ip_list *cur, *tmp_cur; struct list_head *head; + bool ret = false; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; if (dev == NULL) - return; - - if (!is_vlan_dev(dev)) - return; + goto out; switch (event) { case NX_IP_UP: - list_for_each(head, &adapter->vlan_ip_list) { - cur = list_entry(head, struct nx_vlan_ip_list, list); + list_for_each(head, &adapter->ip_list) { + cur = list_entry(head, struct nx_ip_list, list); if (cur->ip_addr == ifa->ifa_address) - return; + goto out; } - cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); + cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) - return; - + goto out; + if (dev->priv_flags & IFF_802_1Q_VLAN) + dev = vlan_dev_real_dev(dev); + cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; - list_add_tail(&cur->list, &adapter->vlan_ip_list); + list_add_tail(&cur->list, &adapter->ip_list); + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); + ret = true; break; case NX_IP_DOWN: list_for_each_entry_safe(cur, tmp_cur, - &adapter->vlan_ip_list, list) { + &adapter->ip_list, list) { if (cur->ip_addr == ifa->ifa_address) { list_del(&cur->list); kfree(cur); + netxen_config_ipaddr(adapter, ifa->ifa_address, + NX_IP_DOWN); + ret = true; break; } } } +out: + return ret; } + static void netxen_config_indev_addr(struct netxen_adapter *adapter, struct net_device *dev, unsigned long event) @@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter, for_ifa(indev) { switch (event) { case NETDEV_UP: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + netxen_list_config_ip(adapter, ifa, NX_IP_UP); break; case NETDEV_DOWN: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); break; default: break; @@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_vlan_ip_list *pos, *tmp_pos; + struct nx_ip_list *pos, *tmp_pos; unsigned long ip_event; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; netxen_config_indev_addr(adapter, netdev, event); - list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { + list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); } } +static inline bool +netxen_config_checkdev(struct net_device *dev) +{ + struct netxen_adapter *adapter; + + if (!is_netxen_netdev(dev)) + return false; + adapter = netdev_priv(dev); + if (!adapter) + return false; + if (!netxen_destip_supported(adapter)) + return false; + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return false; + + return true; +} + +/** + * netxen_config_master - configure addresses based on master + * @dev: netxen device + * @event: netdev event + */ +static void netxen_config_master(struct net_device *dev, unsigned long event) +{ + struct net_device *master, *slave; + struct netxen_adapter *adapter = netdev_priv(dev); + + rcu_read_lock(); + master = netdev_master_upper_dev_get_rcu(dev); + /* + * This is the case where the netxen nic is being + * enslaved and is dev_open()ed in bond_enslave() + * Now we should program the bond's (and its vlans') + * addresses in the netxen NIC. + */ + if (master && netif_is_bond_master(master) && + !netif_is_bond_slave(dev)) { + netxen_config_indev_addr(adapter, master, event); + for_each_netdev_rcu(&init_net, slave) + if (slave->priv_flags & IFF_802_1Q_VLAN && + vlan_dev_real_dev(slave) == master) + netxen_config_indev_addr(adapter, slave, event); + } + rcu_read_unlock(); + /* + * This is the case where the netxen nic is being + * released and is dev_close()ed in bond_release() + * just before IFF_BONDING is stripped. + */ + if (!master && dev->priv_flags & IFF_BONDING) + netxen_free_ip_list(adapter, true); +} + static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev = (struct net_device *)ptr; struct net_device *orig_dev = dev; + struct net_device *slave; recheck: if (dev == NULL) @@ -3257,19 +3323,28 @@ recheck: dev = vlan_dev_real_dev(dev); goto recheck; } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - netxen_config_indev_addr(adapter, orig_dev, event); + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_config_indev_addr(adapter, + orig_dev, event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + /* Act only if the actual netxen is the target */ + if (orig_dev == dev) + netxen_config_master(dev, event); + netxen_config_indev_addr(adapter, orig_dev, event); + } + } done: return NOTIFY_DONE; } @@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; - struct net_device *dev; - + struct net_device *dev, *slave; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + unsigned long ip_event; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; recheck: if (dev == NULL) goto done; @@ -3293,31 +3368,24 @@ recheck: dev = vlan_dev_real_dev(dev); goto recheck; } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter || !netxen_destip_supported(adapter)) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_list_config_ip(adapter, ifa, ip_event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + netxen_list_config_ip(adapter, ifa, ip_event); + } } - done: return NOTIFY_DONE; } @@ -3334,7 +3402,7 @@ static void netxen_restore_indev_addr(struct net_device *dev, unsigned long event) { } static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { } #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e1ebeffa6b35..9fc1ab0c8914 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1617,6 +1617,9 @@ extern seqcount_t devnet_rename_seq; /* Device rename seq */ list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_continue_rcu(net, d) \ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_in_bond_rcu(bond, slave) \ + for_each_netdev_rcu(&init_net, slave) \ + if (netdev_master_upper_dev_get_rcu(slave) == bond) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) static inline struct net_device *next_net_device(struct net_device *dev) @@ -2774,6 +2777,11 @@ static inline void netif_set_gso_max_size(struct net_device *dev, dev->gso_max_size = size; } +static inline bool netif_is_bond_master(struct net_device *dev) +{ + return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; +} + static inline bool netif_is_bond_slave(struct net_device *dev) { return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; -- cgit v1.2.3-70-g09d2 From 764444f5a324ad5a272773f078192819084388ce Mon Sep 17 00:00:00 2001 From: Fernando Luis Vazquez Cao Date: Wed, 13 Mar 2013 16:57:25 +0000 Subject: net: clean leftover of COMPAT_NET_DEV_OPS removal COMPAT_NET_DEV_OPS was removed a while back and with it the definition of netdev_resync_ops() went away. Let's finish the clean-up. Signed-off-by: Fernando Luis Vazquez Cao Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9fc1ab0c8914..56e3e0665272 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1692,7 +1692,6 @@ extern int netdev_refcnt_read(const struct net_device *dev); extern void free_netdev(struct net_device *dev); extern void synchronize_net(void); extern int init_dummy_netdev(struct net_device *dev); -extern void netdev_resync_ops(struct net_device *dev); extern struct net_device *dev_get_by_index(struct net *net, int ifindex); extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); -- cgit v1.2.3-70-g09d2 From a362db3d6c8a952cbde510b1fa35d0ee001b347e Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 16 Mar 2013 04:47:55 +0000 Subject: net: fix some typos in netif features Cc: Pravin B Shelar Cc: "David S. Miller" Signed-off-by: Cong Wang Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index f5e797c0c2a4..d6ee2d008ee4 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -102,8 +102,8 @@ enum { #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) -#define NETIF_F_GRE_GSO __NETIF_F(GSO_GRE) -#define NETIF_F_UDP_TUNNEL __NETIF_F(UDP_TUNNEL) +#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) +#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ -- cgit v1.2.3-70-g09d2 From 1a2c6181c4a1922021b4d7df373bba612c3e5f04 Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Sun, 17 Mar 2013 08:23:34 +0000 Subject: tcp: Remove TCPCT TCPCT uses option-number 253, reserved for experimental use and should not be used in production environments. Further, TCPCT does not fully implement RFC 6013. As a nice side-effect, removing TCPCT increases TCP's performance for very short flows: Doing an apache-benchmark with -c 100 -n 100000, sending HTTP-requests for files of 1KB size. before this patch: average (among 7 runs) of 20845.5 Requests/Second after: average (among 7 runs) of 21403.6 Requests/Second Signed-off-by: Christoph Paasch Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 8 - drivers/infiniband/hw/cxgb4/cm.c | 2 +- include/linux/tcp.h | 10 -- include/net/request_sock.h | 8 +- include/net/tcp.h | 89 +---------- include/uapi/linux/tcp.h | 26 ---- net/dccp/ipv4.c | 5 +- net/dccp/ipv6.c | 5 +- net/ipv4/inet_connection_sock.c | 2 +- net/ipv4/syncookies.c | 3 +- net/ipv4/sysctl_net_ipv4.c | 7 - net/ipv4/tcp.c | 267 --------------------------------- net/ipv4/tcp_input.c | 69 +-------- net/ipv4/tcp_ipv4.c | 60 +------- net/ipv4/tcp_minisocks.c | 40 +---- net/ipv4/tcp_output.c | 219 +-------------------------- net/ipv6/syncookies.c | 3 +- net/ipv6/tcp_ipv6.c | 56 +------ 18 files changed, 38 insertions(+), 841 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 18a24c405ac0..17953e2bc3e9 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -175,14 +175,6 @@ tcp_congestion_control - STRING is inherited. [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ] -tcp_cookie_size - INTEGER - Default size of TCP Cookie Transactions (TCPCT) option, that may be - overridden on a per socket basis by the TCPCT socket option. - Values greater than the maximum (16) are interpreted as the maximum. - Values greater than zero and less than the minimum (8) are interpreted - as the minimum. Odd values are interpreted as the next even value. - Default: 0 (off). - tcp_dsack - BOOLEAN Allows TCP to send "duplicate" SACKs. diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8dcc84fd9d30..54fd31fcc332 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2915,7 +2915,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); - tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); + tcp_parse_options(skb, &tmp_opt, 0, NULL); req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 763c108ee03d..ed6a7456eecd 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -90,9 +90,6 @@ struct tcp_options_received { sack_ok : 4, /* SACK seen on SYN packet */ snd_wscale : 4, /* Window scaling received from sender */ rcv_wscale : 4; /* Window scaling to send to receiver */ - u8 cookie_plus:6, /* bytes in authenticator/cookie option */ - cookie_out_never:1, - cookie_in_always:1; u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ @@ -102,7 +99,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) { rx_opt->tstamp_ok = rx_opt->sack_ok = 0; rx_opt->wscale_ok = rx_opt->snd_wscale = 0; - rx_opt->cookie_plus = 0; } /* This is the max number of SACKS that we'll generate and process. It's safe @@ -320,12 +316,6 @@ struct tcp_sock { struct tcp_md5sig_info __rcu *md5sig_info; #endif - /* When the cookie options are generated and exchanged, then this - * object holds a reference to them (cookie_values->kref). Also - * contains related tcp_cookie_transactions fields. - */ - struct tcp_cookie_values *cookie_values; - /* TCP fastopen related information */ struct tcp_fastopen_request *fastopen_req; /* fastopen_rsk points to request_sock that resulted in this big diff --git a/include/net/request_sock.h b/include/net/request_sock.h index a51dbd17c2de..9069e65c1c56 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -27,19 +27,13 @@ struct sk_buff; struct dst_entry; struct proto; -/* empty to "strongly type" an otherwise void parameter. - */ -struct request_values { -}; - struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(struct sock *sk, - struct request_sock *req, - struct request_values *rvp); + struct request_sock *req); void (*send_ack)(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void (*send_reset)(struct sock *sk, diff --git a/include/net/tcp.h b/include/net/tcp.h index ab9f947b118b..7f2f17198d75 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -179,7 +179,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ -#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP * experimental options. See draft-ietf-tcpm-experimental-options-00.txt @@ -454,7 +453,7 @@ extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); extern void tcp_parse_options(const struct sk_buff *skb, - struct tcp_options_received *opt_rx, const u8 **hvpp, + struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc); extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); @@ -476,7 +475,6 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, extern int tcp_connect(struct sock *sk); extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp, struct tcp_fastopen_cookie *foc); extern int tcp_disconnect(struct sock *sk, int flags); @@ -1589,91 +1587,6 @@ struct tcp_request_sock_ops { #endif }; -/* Using SHA1 for now, define some constants. - */ -#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS) -#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4) -#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS) - -extern int tcp_cookie_generator(u32 *bakery); - -/** - * struct tcp_cookie_values - each socket needs extra space for the - * cookies, together with (optional) space for any SYN data. - * - * A tcp_sock contains a pointer to the current value, and this is - * cloned to the tcp_timewait_sock. - * - * @cookie_pair: variable data from the option exchange. - * - * @cookie_desired: user specified tcpct_cookie_desired. Zero - * indicates default (sysctl_tcp_cookie_size). - * After cookie sent, remembers size of cookie. - * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX. - * - * @s_data_desired: user specified tcpct_s_data_desired. When the - * constant payload is specified (@s_data_constant), - * holds its length instead. - * Range 0 to TCP_MSS_DESIRED. - * - * @s_data_payload: constant data that is to be included in the - * payload of SYN or SYNACK segments when the - * cookie option is present. - */ -struct tcp_cookie_values { - struct kref kref; - u8 cookie_pair[TCP_COOKIE_PAIR_SIZE]; - u8 cookie_pair_size; - u8 cookie_desired; - u16 s_data_desired:11, - s_data_constant:1, - s_data_in:1, - s_data_out:1, - s_data_unused:2; - u8 s_data_payload[0]; -}; - -static inline void tcp_cookie_values_release(struct kref *kref) -{ - kfree(container_of(kref, struct tcp_cookie_values, kref)); -} - -/* The length of constant payload data. Note that s_data_desired is - * overloaded, depending on s_data_constant: either the length of constant - * data (returned here) or the limit on variable data. - */ -static inline int tcp_s_data_size(const struct tcp_sock *tp) -{ - return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant) - ? tp->cookie_values->s_data_desired - : 0; -} - -/** - * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace. - * - * As tcp_request_sock has already been extended in other places, the - * only remaining method is to pass stack values along as function - * parameters. These parameters are not needed after sending SYNACK. - * - * @cookie_bakery: cryptographic secret and message workspace. - * - * @cookie_plus: bytes in authenticator/cookie option, copied from - * struct tcp_options_received (above). - */ -struct tcp_extend_values { - struct request_values rv; - u32 cookie_bakery[COOKIE_WORKSPACE_WORDS]; - u8 cookie_plus:6, - cookie_out_never:1, - cookie_in_always:1; -}; - -static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp) -{ - return (struct tcp_extend_values *)rvp; -} - extern void tcp_v4_init(void); extern void tcp_init(void); diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 6b1ead0b0c9d..8d776ebc4829 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -102,7 +102,6 @@ enum { #define TCP_QUICKACK 12 /* Block/reenable quick acks */ #define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ -#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ @@ -199,29 +198,4 @@ struct tcp_md5sig { __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ }; -/* for TCP_COOKIE_TRANSACTIONS (TCPCT) socket option */ -#define TCP_COOKIE_MIN 8 /* 64-bits */ -#define TCP_COOKIE_MAX 16 /* 128-bits */ -#define TCP_COOKIE_PAIR_SIZE (2*TCP_COOKIE_MAX) - -/* Flags for both getsockopt and setsockopt */ -#define TCP_COOKIE_IN_ALWAYS (1 << 0) /* Discard SYN without cookie */ -#define TCP_COOKIE_OUT_NEVER (1 << 1) /* Prohibit outgoing cookies, - * supercedes everything. */ - -/* Flags for getsockopt */ -#define TCP_S_DATA_IN (1 << 2) /* Was data received? */ -#define TCP_S_DATA_OUT (1 << 3) /* Was data sent? */ - -/* TCP_COOKIE_TRANSACTIONS data */ -struct tcp_cookie_transactions { - __u16 tcpct_flags; /* see above */ - __u8 __tcpct_pad1; /* zero */ - __u8 tcpct_cookie_desired; /* bytes */ - __u16 tcpct_s_data_desired; /* bytes of variable data */ - __u16 tcpct_used; /* bytes in value */ - __u8 tcpct_value[TCP_MSS_DEFAULT]; -}; - - #endif /* _UAPI_LINUX_TCP_H */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 4f9f5eb478f1..ebc54fef85a5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -500,8 +500,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, return &rt->dst; } -static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, - struct request_values *rv_unused) +static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) { int err = -1; struct sk_buff *skb; @@ -658,7 +657,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) dreq->dreq_gss = dreq->dreq_iss; dreq->dreq_service = service; - if (dccp_v4_send_response(sk, req, NULL)) + if (dccp_v4_send_response(sk, req)) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 6e05981f271e..9c61f9c02fdb 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -213,8 +213,7 @@ out: } -static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, - struct request_values *rv_unused) +static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); @@ -428,7 +427,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) dreq->dreq_gss = dreq->dreq_iss; dreq->dreq_service = service; - if (dccp_v6_send_response(sk, req, NULL)) + if (dccp_v6_send_response(sk, req)) goto drop_and_free; inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 786d97aee751..6acb541c9091 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -559,7 +559,7 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) { - int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL); + int err = req->rsk_ops->rtx_syn_ack(parent, req); if (!err) req->num_retrans++; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11c..7f4a5cb8f8d0 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -267,7 +267,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { struct tcp_options_received tcp_opt; - const u8 *hash_location; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); @@ -294,7 +293,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); + tcp_parse_options(skb, &tcp_opt, 0, NULL); if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) goto out; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index cca4550f4082..cb45062c8be0 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -732,13 +732,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, - { - .procname = "tcp_cookie_size", - .data = &sysctl_tcp_cookie_size, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_thin_linear_timeouts", .data = &sysctl_tcp_thin_linear_timeouts, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8d14573ade77..17a6810af5c8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -409,15 +409,6 @@ void tcp_init_sock(struct sock *sk) icsk->icsk_sync_mss = tcp_sync_mss; - /* TCP Cookie Transactions */ - if (sysctl_tcp_cookie_size > 0) { - /* Default, cookies without s_data_payload. */ - tp->cookie_values = - kzalloc(sizeof(*tp->cookie_values), - sk->sk_allocation); - if (tp->cookie_values != NULL) - kref_init(&tp->cookie_values->kref); - } /* Presumed zeroed, in order of appearance: * cookie_in_always, cookie_out_never, * s_data_constant, s_data_in, s_data_out @@ -2397,92 +2388,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level, release_sock(sk); return err; } - case TCP_COOKIE_TRANSACTIONS: { - struct tcp_cookie_transactions ctd; - struct tcp_cookie_values *cvp = NULL; - - if (sizeof(ctd) > optlen) - return -EINVAL; - if (copy_from_user(&ctd, optval, sizeof(ctd))) - return -EFAULT; - - if (ctd.tcpct_used > sizeof(ctd.tcpct_value) || - ctd.tcpct_s_data_desired > TCP_MSS_DESIRED) - return -EINVAL; - - if (ctd.tcpct_cookie_desired == 0) { - /* default to global value */ - } else if ((0x1 & ctd.tcpct_cookie_desired) || - ctd.tcpct_cookie_desired > TCP_COOKIE_MAX || - ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) { - return -EINVAL; - } - - if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) { - /* Supercedes all other values */ - lock_sock(sk); - if (tp->cookie_values != NULL) { - kref_put(&tp->cookie_values->kref, - tcp_cookie_values_release); - tp->cookie_values = NULL; - } - tp->rx_opt.cookie_in_always = 0; /* false */ - tp->rx_opt.cookie_out_never = 1; /* true */ - release_sock(sk); - return err; - } - - /* Allocate ancillary memory before locking. - */ - if (ctd.tcpct_used > 0 || - (tp->cookie_values == NULL && - (sysctl_tcp_cookie_size > 0 || - ctd.tcpct_cookie_desired > 0 || - ctd.tcpct_s_data_desired > 0))) { - cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used, - GFP_KERNEL); - if (cvp == NULL) - return -ENOMEM; - - kref_init(&cvp->kref); - } - lock_sock(sk); - tp->rx_opt.cookie_in_always = - (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags); - tp->rx_opt.cookie_out_never = 0; /* false */ - - if (tp->cookie_values != NULL) { - if (cvp != NULL) { - /* Changed values are recorded by a changed - * pointer, ensuring the cookie will differ, - * without separately hashing each value later. - */ - kref_put(&tp->cookie_values->kref, - tcp_cookie_values_release); - } else { - cvp = tp->cookie_values; - } - } - - if (cvp != NULL) { - cvp->cookie_desired = ctd.tcpct_cookie_desired; - - if (ctd.tcpct_used > 0) { - memcpy(cvp->s_data_payload, ctd.tcpct_value, - ctd.tcpct_used); - cvp->s_data_desired = ctd.tcpct_used; - cvp->s_data_constant = 1; /* true */ - } else { - /* No constant payload data. */ - cvp->s_data_desired = ctd.tcpct_s_data_desired; - cvp->s_data_constant = 0; /* false */ - } - - tp->cookie_values = cvp; - } - release_sock(sk); - return err; - } default: /* fallthru */ break; @@ -2902,41 +2807,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return -EFAULT; return 0; - case TCP_COOKIE_TRANSACTIONS: { - struct tcp_cookie_transactions ctd; - struct tcp_cookie_values *cvp = tp->cookie_values; - - if (get_user(len, optlen)) - return -EFAULT; - if (len < sizeof(ctd)) - return -EINVAL; - - memset(&ctd, 0, sizeof(ctd)); - ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ? - TCP_COOKIE_IN_ALWAYS : 0) - | (tp->rx_opt.cookie_out_never ? - TCP_COOKIE_OUT_NEVER : 0); - - if (cvp != NULL) { - ctd.tcpct_flags |= (cvp->s_data_in ? - TCP_S_DATA_IN : 0) - | (cvp->s_data_out ? - TCP_S_DATA_OUT : 0); - - ctd.tcpct_cookie_desired = cvp->cookie_desired; - ctd.tcpct_s_data_desired = cvp->s_data_desired; - - memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], - cvp->cookie_pair_size); - ctd.tcpct_used = cvp->cookie_pair_size; - } - - if (put_user(sizeof(ctd), optlen)) - return -EFAULT; - if (copy_to_user(optval, &ctd, sizeof(ctd))) - return -EFAULT; - return 0; - } case TCP_THIN_LINEAR_TIMEOUTS: val = tp->thin_lto; break; @@ -3409,134 +3279,6 @@ EXPORT_SYMBOL(tcp_md5_hash_key); #endif -/* Each Responder maintains up to two secret values concurrently for - * efficient secret rollover. Each secret value has 4 states: - * - * Generating. (tcp_secret_generating != tcp_secret_primary) - * Generates new Responder-Cookies, but not yet used for primary - * verification. This is a short-term state, typically lasting only - * one round trip time (RTT). - * - * Primary. (tcp_secret_generating == tcp_secret_primary) - * Used both for generation and primary verification. - * - * Retiring. (tcp_secret_retiring != tcp_secret_secondary) - * Used for verification, until the first failure that can be - * verified by the newer Generating secret. At that time, this - * cookie's state is changed to Secondary, and the Generating - * cookie's state is changed to Primary. This is a short-term state, - * typically lasting only one round trip time (RTT). - * - * Secondary. (tcp_secret_retiring == tcp_secret_secondary) - * Used for secondary verification, after primary verification - * failures. This state lasts no more than twice the Maximum Segment - * Lifetime (2MSL). Then, the secret is discarded. - */ -struct tcp_cookie_secret { - /* The secret is divided into two parts. The digest part is the - * equivalent of previously hashing a secret and saving the state, - * and serves as an initialization vector (IV). The message part - * serves as the trailing secret. - */ - u32 secrets[COOKIE_WORKSPACE_WORDS]; - unsigned long expires; -}; - -#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL) -#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2) -#define TCP_SECRET_LIFE (HZ * 600) - -static struct tcp_cookie_secret tcp_secret_one; -static struct tcp_cookie_secret tcp_secret_two; - -/* Essentially a circular list, without dynamic allocation. */ -static struct tcp_cookie_secret *tcp_secret_generating; -static struct tcp_cookie_secret *tcp_secret_primary; -static struct tcp_cookie_secret *tcp_secret_retiring; -static struct tcp_cookie_secret *tcp_secret_secondary; - -static DEFINE_SPINLOCK(tcp_secret_locker); - -/* Select a pseudo-random word in the cookie workspace. - */ -static inline u32 tcp_cookie_work(const u32 *ws, const int n) -{ - return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])]; -} - -/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed. - * Called in softirq context. - * Returns: 0 for success. - */ -int tcp_cookie_generator(u32 *bakery) -{ - unsigned long jiffy = jiffies; - - if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) { - spin_lock_bh(&tcp_secret_locker); - if (!time_after_eq(jiffy, tcp_secret_generating->expires)) { - /* refreshed by another */ - memcpy(bakery, - &tcp_secret_generating->secrets[0], - COOKIE_WORKSPACE_WORDS); - } else { - /* still needs refreshing */ - get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS); - - /* The first time, paranoia assumes that the - * randomization function isn't as strong. But, - * this secret initialization is delayed until - * the last possible moment (packet arrival). - * Although that time is observable, it is - * unpredictably variable. Mash in the most - * volatile clock bits available, and expire the - * secret extra quickly. - */ - if (unlikely(tcp_secret_primary->expires == - tcp_secret_secondary->expires)) { - struct timespec tv; - - getnstimeofday(&tv); - bakery[COOKIE_DIGEST_WORDS+0] ^= - (u32)tv.tv_nsec; - - tcp_secret_secondary->expires = jiffy - + TCP_SECRET_1MSL - + (0x0f & tcp_cookie_work(bakery, 0)); - } else { - tcp_secret_secondary->expires = jiffy - + TCP_SECRET_LIFE - + (0xff & tcp_cookie_work(bakery, 1)); - tcp_secret_primary->expires = jiffy - + TCP_SECRET_2MSL - + (0x1f & tcp_cookie_work(bakery, 2)); - } - memcpy(&tcp_secret_secondary->secrets[0], - bakery, COOKIE_WORKSPACE_WORDS); - - rcu_assign_pointer(tcp_secret_generating, - tcp_secret_secondary); - rcu_assign_pointer(tcp_secret_retiring, - tcp_secret_primary); - /* - * Neither call_rcu() nor synchronize_rcu() needed. - * Retiring data is not freed. It is replaced after - * further (locked) pointer updates, and a quiet time - * (minimum 1MSL, maximum LIFE - 2MSL). - */ - } - spin_unlock_bh(&tcp_secret_locker); - } else { - rcu_read_lock_bh(); - memcpy(bakery, - &rcu_dereference(tcp_secret_generating)->secrets[0], - COOKIE_WORKSPACE_WORDS); - rcu_read_unlock_bh(); - } - return 0; -} -EXPORT_SYMBOL(tcp_cookie_generator); - void tcp_done(struct sock *sk) { struct request_sock *req = tcp_sk(sk)->fastopen_rsk; @@ -3591,7 +3333,6 @@ void __init tcp_init(void) unsigned long limit; int max_rshare, max_wshare, cnt; unsigned int i; - unsigned long jiffy = jiffies; BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); @@ -3667,13 +3408,5 @@ void __init tcp_init(void) tcp_register_congestion_control(&tcp_reno); - memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); - memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets)); - tcp_secret_one.expires = jiffy; /* past due */ - tcp_secret_two.expires = jiffy; /* past due */ - tcp_secret_generating = &tcp_secret_one; - tcp_secret_primary = &tcp_secret_one; - tcp_secret_retiring = &tcp_secret_two; - tcp_secret_secondary = &tcp_secret_two; tcp_tasklet_init(); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 836d74dd0187..19f0149fb6a2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3760,8 +3760,8 @@ old_ack: * But, this can also be called on packets in the established flow when * the fast version below fails. */ -void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, - const u8 **hvpp, int estab, +void tcp_parse_options(const struct sk_buff *skb, + struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc) { const unsigned char *ptr; @@ -3845,31 +3845,6 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o */ break; #endif - case TCPOPT_COOKIE: - /* This option is variable length. - */ - switch (opsize) { - case TCPOLEN_COOKIE_BASE: - /* not yet implemented */ - break; - case TCPOLEN_COOKIE_PAIR: - /* not yet implemented */ - break; - case TCPOLEN_COOKIE_MIN+0: - case TCPOLEN_COOKIE_MIN+2: - case TCPOLEN_COOKIE_MIN+4: - case TCPOLEN_COOKIE_MIN+6: - case TCPOLEN_COOKIE_MAX: - /* 16-bit multiple */ - opt_rx->cookie_plus = opsize; - *hvpp = ptr; - break; - default: - /* ignore option */ - break; - } - break; - case TCPOPT_EXP: /* Fast Open option shares code 254 using a * 16 bits magic number. It's valid only in @@ -3915,8 +3890,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr * If it is wrong it falls back on tcp_parse_options(). */ static bool tcp_fast_parse_options(const struct sk_buff *skb, - const struct tcphdr *th, - struct tcp_sock *tp, const u8 **hvpp) + const struct tcphdr *th, struct tcp_sock *tp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. @@ -3930,7 +3904,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb, return true; } - tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); + tcp_parse_options(skb, &tp->rx_opt, 1, NULL); if (tp->rx_opt.saw_tstamp) tp->rx_opt.rcv_tsecr -= tp->tsoffset; @@ -5311,12 +5285,10 @@ out: static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { - const u8 *hash_location; struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ - if (tcp_fast_parse_options(skb, th, tp, &hash_location) && - tp->rx_opt.saw_tstamp && + if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); @@ -5670,12 +5642,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, if (mss == tp->rx_opt.user_mss) { struct tcp_options_received opt; - const u8 *hash_location; /* Get original SYNACK MSS value if user MSS sets mss_clamp */ tcp_clear_options(&opt); opt.user_mss = opt.mss_clamp = 0; - tcp_parse_options(synack, &opt, &hash_location, 0, NULL); + tcp_parse_options(synack, &opt, 0, NULL); mss = opt.mss_clamp; } @@ -5706,14 +5677,12 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { - const u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct tcp_cookie_values *cvp = tp->cookie_values; struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; - tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); + tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp) tp->rx_opt.rcv_tsecr -= tp->tsoffset; @@ -5810,30 +5779,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * is initialized. */ tp->copied_seq = tp->rcv_nxt; - if (cvp != NULL && - cvp->cookie_pair_size > 0 && - tp->rx_opt.cookie_plus > 0) { - int cookie_size = tp->rx_opt.cookie_plus - - TCPOLEN_COOKIE_BASE; - int cookie_pair_size = cookie_size - + cvp->cookie_desired; - - /* A cookie extension option was sent and returned. - * Note that each incoming SYNACK replaces the - * Responder cookie. The initial exchange is most - * fragile, as protection against spoofing relies - * entirely upon the sequence and timestamp (above). - * This replacement strategy allows the correct pair to - * pass through, while any others will be filtered via - * Responder verification later. - */ - if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { - memcpy(&cvp->cookie_pair[cvp->cookie_desired], - hash_location, cookie_size); - cvp->cookie_pair_size = cookie_pair_size; - } - } - smp_mb(); tcp_finish_connect(sk, skb); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b7ab868c8284..b27c758ca23f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -838,7 +838,6 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp, u16 queue_mapping, bool nocache) { @@ -851,7 +850,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; - skb = tcp_make_synack(sk, dst, req, rvp, NULL); + skb = tcp_make_synack(sk, dst, req, NULL); if (skb) { __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); @@ -868,10 +867,9 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, return err; } -static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, - struct request_values *rvp) +static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) { - int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); + int res = tcp_v4_send_synack(sk, NULL, req, 0, false); if (!res) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); @@ -1371,8 +1369,7 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, static int tcp_v4_conn_req_fastopen(struct sock *sk, struct sk_buff *skb, struct sk_buff *skb_synack, - struct request_sock *req, - struct request_values *rvp) + struct request_sock *req) { struct tcp_sock *tp = tcp_sk(sk); struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; @@ -1467,9 +1464,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { - struct tcp_extend_values tmp_ext; struct tcp_options_received tmp_opt; - const u8 *hash_location; struct request_sock *req; struct inet_request_sock *ireq; struct tcp_sock *tp = tcp_sk(sk); @@ -1519,42 +1514,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, - want_cookie ? NULL : &foc); - - if (tmp_opt.cookie_plus > 0 && - tmp_opt.saw_tstamp && - !tp->rx_opt.cookie_out_never && - (sysctl_tcp_cookie_size > 0 || - (tp->cookie_values != NULL && - tp->cookie_values->cookie_desired > 0))) { - u8 *c; - u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; - int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; - - if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) - goto drop_and_release; - - /* Secret recipe starts with IP addresses */ - *mess++ ^= (__force u32)daddr; - *mess++ ^= (__force u32)saddr; - - /* plus variable length Initiator Cookie */ - c = (u8 *)mess; - while (l-- > 0) - *c++ ^= *hash_location++; - - want_cookie = false; /* not our kind of cookie */ - tmp_ext.cookie_out_never = 0; /* false */ - tmp_ext.cookie_plus = tmp_opt.cookie_plus; - } else if (!tp->rx_opt.cookie_in_always) { - /* redundant indications, but ensure initialization. */ - tmp_ext.cookie_out_never = 1; /* true */ - tmp_ext.cookie_plus = 0; - } else { - goto drop_and_release; - } - tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; + tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); @@ -1636,7 +1596,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * of tcp_v4_send_synack()->tcp_select_initial_window(). */ skb_synack = tcp_make_synack(sk, dst, req, - (struct request_values *)&tmp_ext, fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); if (skb_synack) { @@ -1660,8 +1619,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (fastopen_cookie_present(&foc) && foc.len != 0) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); - } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req, - (struct request_values *)&tmp_ext)) + } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req)) goto drop_and_free; return 0; @@ -2241,12 +2199,6 @@ void tcp_v4_destroy_sock(struct sock *sk) if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); - /* TCP Cookie Transactions */ - if (tp->cookie_values != NULL) { - kref_put(&tp->cookie_values->kref, - tcp_cookie_values_release); - tp->cookie_values = NULL; - } BUG_ON(tp->fastopen_rsk != NULL); /* If socket is aborted during connect operation */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4bdb09fca401..8f0234f8bb95 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -93,13 +93,12 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_options_received tmp_opt; - const u8 *hash_location; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); bool paws_reject = false; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); + tcp_parse_options(skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; @@ -388,32 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct tcp_request_sock *treq = tcp_rsk(req); struct inet_connection_sock *newicsk = inet_csk(newsk); struct tcp_sock *newtp = tcp_sk(newsk); - struct tcp_sock *oldtp = tcp_sk(sk); - struct tcp_cookie_values *oldcvp = oldtp->cookie_values; - - /* TCP Cookie Transactions require space for the cookie pair, - * as it differs for each connection. There is no need to - * copy any s_data_payload stored at the original socket. - * Failure will prevent resuming the connection. - * - * Presumed copied, in order of appearance: - * cookie_in_always, cookie_out_never - */ - if (oldcvp != NULL) { - struct tcp_cookie_values *newcvp = - kzalloc(sizeof(*newtp->cookie_values), - GFP_ATOMIC); - - if (newcvp != NULL) { - kref_init(&newcvp->kref); - newcvp->cookie_desired = - oldcvp->cookie_desired; - newtp->cookie_values = newcvp; - } else { - /* Not Yet Implemented */ - newtp->cookie_values = NULL; - } - } /* Now setup tcp_sock */ newtp->pred_flags = 0; @@ -422,8 +395,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rcv_nxt = treq->rcv_isn + 1; newtp->snd_sml = newtp->snd_una = - newtp->snd_nxt = newtp->snd_up = - treq->snt_isn + 1 + tcp_s_data_size(oldtp); + newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; tcp_prequeue_init(newtp); INIT_LIST_HEAD(&newtp->tsq_node); @@ -460,8 +432,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, tcp_set_ca_state(newsk, TCP_CA_Open); tcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); - newtp->write_seq = newtp->pushed_seq = - treq->snt_isn + 1 + tcp_s_data_size(oldtp); + newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; newtp->rx_opt.saw_tstamp = 0; @@ -538,7 +509,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, bool fastopen) { struct tcp_options_received tmp_opt; - const u8 *hash_location; struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); @@ -548,7 +518,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); + tcp_parse_options(skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; @@ -648,7 +618,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, */ if ((flg & TCP_FLAG_ACK) && !fastopen && (TCP_SKB_CB(skb)->ack_seq != - tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) + tcp_rsk(req)->snt_isn + 1)) return sk; /* Also, it would be not so bad idea to check rcv_tsecr, which diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e7742f0b5d2..ac5871ebe086 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -65,9 +65,6 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; -int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ -EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); - static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp); @@ -386,7 +383,6 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp) #define OPTION_TS (1 << 1) #define OPTION_MD5 (1 << 2) #define OPTION_WSCALE (1 << 3) -#define OPTION_COOKIE_EXTENSION (1 << 4) #define OPTION_FAST_OPEN_COOKIE (1 << 8) struct tcp_out_options { @@ -400,36 +396,6 @@ struct tcp_out_options { struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ }; -/* The sysctl int routines are generic, so check consistency here. - */ -static u8 tcp_cookie_size_check(u8 desired) -{ - int cookie_size; - - if (desired > 0) - /* previously specified */ - return desired; - - cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); - if (cookie_size <= 0) - /* no default specified */ - return 0; - - if (cookie_size <= TCP_COOKIE_MIN) - /* value too small, specify minimum */ - return TCP_COOKIE_MIN; - - if (cookie_size >= TCP_COOKIE_MAX) - /* value too large, specify maximum */ - return TCP_COOKIE_MAX; - - if (cookie_size & 1) - /* 8-bit multiple, illegal, fix it */ - cookie_size++; - - return (u8)cookie_size; -} - /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of @@ -448,27 +414,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, { u16 options = opts->options; /* mungable copy */ - /* Having both authentication and cookies for security is redundant, - * and there's certainly not enough room. Instead, the cookie-less - * extension variant is proposed. - * - * Consider the pessimal case with authentication. The options - * could look like: - * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 - */ if (unlikely(OPTION_MD5 & options)) { - if (unlikely(OPTION_COOKIE_EXTENSION & options)) { - *ptr++ = htonl((TCPOPT_COOKIE << 24) | - (TCPOLEN_COOKIE_BASE << 16) | - (TCPOPT_MD5SIG << 8) | - TCPOLEN_MD5SIG); - } else { - *ptr++ = htonl((TCPOPT_NOP << 24) | - (TCPOPT_NOP << 16) | - (TCPOPT_MD5SIG << 8) | - TCPOLEN_MD5SIG); - } - options &= ~OPTION_COOKIE_EXTENSION; + *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* overload cookie hash location */ opts->hash_location = (__u8 *)ptr; ptr += 4; @@ -497,44 +445,6 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, *ptr++ = htonl(opts->tsecr); } - /* Specification requires after timestamp, so do it now. - * - * Consider the pessimal case without authentication. The options - * could look like: - * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 - */ - if (unlikely(OPTION_COOKIE_EXTENSION & options)) { - __u8 *cookie_copy = opts->hash_location; - u8 cookie_size = opts->hash_size; - - /* 8-bit multiple handled in tcp_cookie_size_check() above, - * and elsewhere. - */ - if (0x2 & cookie_size) { - __u8 *p = (__u8 *)ptr; - - /* 16-bit multiple */ - *p++ = TCPOPT_COOKIE; - *p++ = TCPOLEN_COOKIE_BASE + cookie_size; - *p++ = *cookie_copy++; - *p++ = *cookie_copy++; - ptr++; - cookie_size -= 2; - } else { - /* 32-bit multiple */ - *ptr++ = htonl(((TCPOPT_NOP << 24) | - (TCPOPT_NOP << 16) | - (TCPOPT_COOKIE << 8) | - TCPOLEN_COOKIE_BASE) + - cookie_size); - } - - if (cookie_size > 0) { - memcpy(ptr, cookie_copy, cookie_size); - ptr += (cookie_size / 4); - } - } - if (unlikely(OPTION_SACK_ADVERTISE & options)) { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | @@ -593,11 +503,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_cookie_values *cvp = tp->cookie_values; unsigned int remaining = MAX_TCP_OPTION_SPACE; - u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? - tcp_cookie_size_check(cvp->cookie_desired) : - 0; struct tcp_fastopen_request *fastopen = tp->fastopen_req; #ifdef CONFIG_TCP_MD5SIG @@ -649,52 +555,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, tp->syn_fastopen = 1; } } - /* Note that timestamps are required by the specification. - * - * Odd numbers of bytes are prohibited by the specification, ensuring - * that the cookie is 16-bit aligned, and the resulting cookie pair is - * 32-bit aligned. - */ - if (*md5 == NULL && - (OPTION_TS & opts->options) && - cookie_size > 0) { - int need = TCPOLEN_COOKIE_BASE + cookie_size; - - if (0x2 & need) { - /* 32-bit multiple */ - need += 2; /* NOPs */ - - if (need > remaining) { - /* try shrinking cookie to fit */ - cookie_size -= 2; - need -= 4; - } - } - while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { - cookie_size -= 4; - need -= 4; - } - if (TCP_COOKIE_MIN <= cookie_size) { - opts->options |= OPTION_COOKIE_EXTENSION; - opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; - opts->hash_size = cookie_size; - - /* Remember for future incarnations. */ - cvp->cookie_desired = cookie_size; - - if (cvp->cookie_desired != cvp->cookie_pair_size) { - /* Currently use random bytes as a nonce, - * assuming these are completely unpredictable - * by hostile users of the same system. - */ - get_random_bytes(&cvp->cookie_pair[0], - cookie_size); - cvp->cookie_pair_size = cookie_size; - } - remaining -= need; - } - } return MAX_TCP_OPTION_SPACE - remaining; } @@ -704,14 +565,10 @@ static unsigned int tcp_synack_options(struct sock *sk, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, - struct tcp_extend_values *xvp, struct tcp_fastopen_cookie *foc) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; - u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? - xvp->cookie_plus : - 0; #ifdef CONFIG_TCP_MD5SIG *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); @@ -759,28 +616,7 @@ static unsigned int tcp_synack_options(struct sock *sk, remaining -= need; } } - /* Similar rationale to tcp_syn_options() applies here, too. - * If the options fit, the same options should fit now! - */ - if (*md5 == NULL && - ireq->tstamp_ok && - cookie_plus > TCPOLEN_COOKIE_BASE) { - int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ - - if (0x2 & need) { - /* 32-bit multiple */ - need += 2; /* NOPs */ - } - if (need <= remaining) { - opts->options |= OPTION_COOKIE_EXTENSION; - opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; - remaining -= need; - } else { - /* There's no error return, so flag it. */ - xvp->cookie_out_never = 1; /* true */ - opts->hash_size = 0; - } - } + return MAX_TCP_OPTION_SPACE - remaining; } @@ -2802,32 +2638,24 @@ int tcp_send_synack(struct sock *sk) * sk: listener socket * dst: dst entry attached to the SYNACK * req: request_sock pointer - * rvp: request_values pointer * * Allocate one skb and build a SYNACK packet. * @dst is consumed : Caller should not use it again. */ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp, struct tcp_fastopen_cookie *foc) { struct tcp_out_options opts; - struct tcp_extend_values *xvp = tcp_xv(rvp); struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); - const struct tcp_cookie_values *cvp = tp->cookie_values; struct tcphdr *th; struct sk_buff *skb; struct tcp_md5sig_key *md5; int tcp_header_size; int mss; - int s_data_desired = 0; - if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) - s_data_desired = cvp->s_data_desired; - skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, - sk_gfp_atomic(sk, GFP_ATOMIC)); + skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); if (unlikely(!skb)) { dst_release(dst); return NULL; @@ -2869,9 +2697,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, else #endif TCP_SKB_CB(skb)->when = tcp_time_stamp; - tcp_header_size = tcp_synack_options(sk, req, mss, - skb, &opts, &md5, xvp, foc) - + sizeof(*th); + tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, + foc) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); @@ -2889,40 +2716,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, TCPHDR_SYN | TCPHDR_ACK); - if (OPTION_COOKIE_EXTENSION & opts.options) { - if (s_data_desired) { - u8 *buf = skb_put(skb, s_data_desired); - - /* copy data directly from the listening socket. */ - memcpy(buf, cvp->s_data_payload, s_data_desired); - TCP_SKB_CB(skb)->end_seq += s_data_desired; - } - - if (opts.hash_size > 0) { - __u32 workspace[SHA_WORKSPACE_WORDS]; - u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; - u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; - - /* Secret recipe depends on the Timestamp, (future) - * Sequence and Acknowledgment Numbers, Initiator - * Cookie, and others handled by IP variant caller. - */ - *tail-- ^= opts.tsval; - *tail-- ^= tcp_rsk(req)->rcv_isn + 1; - *tail-- ^= TCP_SKB_CB(skb)->seq + 1; - - /* recommended */ - *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); - *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ - - sha_transform((__u32 *)&xvp->cookie_bakery[0], - (char *)mess, - &workspace[0]); - opts.hash_location = - (__u8 *)&xvp->cookie_bakery[0]; - } - } - th->seq = htonl(TCP_SKB_CB(skb)->seq); /* XXX data is queued and acked as is. No buffer/window check */ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 8a0848b60b35..d5dda20bd717 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -149,7 +149,6 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie) struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) { struct tcp_options_received tcp_opt; - const u8 *hash_location; struct inet_request_sock *ireq; struct inet6_request_sock *ireq6; struct tcp_request_sock *treq; @@ -177,7 +176,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); + tcp_parse_options(skb, &tcp_opt, 0, NULL); if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) goto out; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df5..0a97add2ab74 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -454,7 +454,6 @@ out: static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, struct flowi6 *fl6, struct request_sock *req, - struct request_values *rvp, u16 queue_mapping) { struct inet6_request_sock *treq = inet6_rsk(req); @@ -466,7 +465,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) goto done; - skb = tcp_make_synack(sk, dst, req, rvp, NULL); + skb = tcp_make_synack(sk, dst, req, NULL); if (skb) { __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); @@ -481,13 +480,12 @@ done: return err; } -static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, - struct request_values *rvp) +static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req) { struct flowi6 fl6; int res; - res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); + res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); if (!res) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); return res; @@ -940,9 +938,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) */ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { - struct tcp_extend_values tmp_ext; struct tcp_options_received tmp_opt; - const u8 *hash_location; struct request_sock *req; struct inet6_request_sock *treq; struct ipv6_pinfo *np = inet6_sk(sk); @@ -980,50 +976,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); - - if (tmp_opt.cookie_plus > 0 && - tmp_opt.saw_tstamp && - !tp->rx_opt.cookie_out_never && - (sysctl_tcp_cookie_size > 0 || - (tp->cookie_values != NULL && - tp->cookie_values->cookie_desired > 0))) { - u8 *c; - u32 *d; - u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; - int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; - - if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) - goto drop_and_free; - - /* Secret recipe starts with IP addresses */ - d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; - *mess++ ^= *d++; - *mess++ ^= *d++; - *mess++ ^= *d++; - *mess++ ^= *d++; - d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; - *mess++ ^= *d++; - *mess++ ^= *d++; - *mess++ ^= *d++; - *mess++ ^= *d++; - - /* plus variable length Initiator Cookie */ - c = (u8 *)mess; - while (l-- > 0) - *c++ ^= *hash_location++; - - want_cookie = false; /* not our kind of cookie */ - tmp_ext.cookie_out_never = 0; /* false */ - tmp_ext.cookie_plus = tmp_opt.cookie_plus; - } else if (!tp->rx_opt.cookie_in_always) { - /* redundant indications, but ensure initialization. */ - tmp_ext.cookie_out_never = 1; /* true */ - tmp_ext.cookie_plus = 0; - } else { - goto drop_and_free; - } - tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; + tcp_parse_options(skb, &tmp_opt, 0, NULL); if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); @@ -1101,7 +1054,6 @@ have_isn: goto drop_and_release; if (tcp_v6_send_synack(sk, dst, &fl6, req, - (struct request_values *)&tmp_ext, skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; -- cgit v1.2.3-70-g09d2 From f77668dc25b27270fe589031b22c432c3462b1d8 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 19 Mar 2013 06:39:30 +0000 Subject: net: flow_dissector: add __skb_get_poff to get a start offset to payload __skb_get_poff() returns the offset to the payload as far as it could be dissected. The main user is currently BPF, so that we can dynamically truncate packets without needing to push actual payload to the user space and instead can analyze headers only. Suggested-by: Eric Dumazet Signed-off-by: Daniel Borkmann Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 ++ net/core/flow_dissector.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b66ecc6ef102..497412165b1c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2840,6 +2840,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb) bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); +u32 __skb_get_poff(const struct sk_buff *skb); + /** * skb_head_is_locked - Determine if the skb->head is locked down * @skb: skb to check diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f4be293bab9e..00ee068efc1c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -5,6 +5,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -228,6 +232,59 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, } EXPORT_SYMBOL(__skb_tx_hash); +/* __skb_get_poff() returns the offset to the payload as far as it could + * be dissected. The main user is currently BPF, so that we can dynamically + * truncate packets without needing to push actual payload to the user + * space and can analyze headers only, instead. + */ +u32 __skb_get_poff(const struct sk_buff *skb) +{ + struct flow_keys keys; + u32 poff = 0; + + if (!skb_flow_dissect(skb, &keys)) + return 0; + + poff += keys.thoff; + switch (keys.ip_proto) { + case IPPROTO_TCP: { + const struct tcphdr *tcph; + struct tcphdr _tcph; + + tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); + if (!tcph) + return poff; + + poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); + break; + } + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + poff += sizeof(struct udphdr); + break; + /* For the rest, we do not really care about header + * extensions at this point for now. + */ + case IPPROTO_ICMP: + poff += sizeof(struct icmphdr); + break; + case IPPROTO_ICMPV6: + poff += sizeof(struct icmp6hdr); + break; + case IPPROTO_IGMP: + poff += sizeof(struct igmphdr); + break; + case IPPROTO_DCCP: + poff += sizeof(struct dccp_hdr); + break; + case IPPROTO_SCTP: + poff += sizeof(struct sctphdr); + break; + } + + return poff; +} + static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { -- cgit v1.2.3-70-g09d2 From 3e5289d5e3f98b7b5b8cac32e9e5a7004c067436 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 19 Mar 2013 06:39:31 +0000 Subject: filter: add ANC_PAY_OFFSET instruction for loading payload start offset It is very useful to do dynamic truncation of packets. In particular, we're interested to push the necessary header bytes to the user space and cut off user payload that should probably not be transferred for some reasons (e.g. privacy, speed, or others). With the ancillary extension PAY_OFFSET, we can load it into the accumulator, and return it. E.g. in bpfc syntax ... ld #poff ; { 0x20, 0, 0, 0xfffff034 }, ret a ; { 0x16, 0, 0, 0x00000000 }, ... as a filter will accomplish this without having to do a big hackery in a BPF filter itself. Follow-up JIT implementations are welcome. Thanks to Eric Dumazet for suggesting and discussing this during the Netfilter Workshop in Copenhagen. Suggested-by: Eric Dumazet Signed-off-by: Daniel Borkmann Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/filter.h | 1 + include/uapi/linux/filter.h | 3 ++- net/core/filter.c | 5 +++++ 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index c45eabc135e1..d2059cb4e465 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -126,6 +126,7 @@ enum { BPF_S_ANC_SECCOMP_LD_W, BPF_S_ANC_VLAN_TAG, BPF_S_ANC_VLAN_TAG_PRESENT, + BPF_S_ANC_PAY_OFFSET, }; #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h index 9cfde6941099..8eb9ccaa5b48 100644 --- a/include/uapi/linux/filter.h +++ b/include/uapi/linux/filter.h @@ -129,7 +129,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define SKF_AD_ALU_XOR_X 40 #define SKF_AD_VLAN_TAG 44 #define SKF_AD_VLAN_TAG_PRESENT 48 -#define SKF_AD_MAX 52 +#define SKF_AD_PAY_OFFSET 52 +#define SKF_AD_MAX 56 #define SKF_NET_OFF (-0x100000) #define SKF_LL_OFF (-0x200000) diff --git a/net/core/filter.c b/net/core/filter.c index 2e20b55a7830..dad2a178f9f8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -348,6 +348,9 @@ load_b: case BPF_S_ANC_VLAN_TAG_PRESENT: A = !!vlan_tx_tag_present(skb); continue; + case BPF_S_ANC_PAY_OFFSET: + A = __skb_get_poff(skb); + continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; @@ -612,6 +615,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) ANCILLARY(ALU_XOR_X); ANCILLARY(VLAN_TAG); ANCILLARY(VLAN_TAG_PRESENT); + ANCILLARY(PAY_OFFSET); } /* ancillary operation unknown or unsupported */ @@ -814,6 +818,7 @@ static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, + [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, -- cgit v1.2.3-70-g09d2 From 2b5faa4c553f90ee2dde1d976b220b1ca9741ef0 Mon Sep 17 00:00:00 2001 From: Jesper Derehag Date: Tue, 19 Mar 2013 20:50:05 +0000 Subject: connector: Added coredumping event to the process connector Process connector can now also detect coredumping events. Main aim of patch is get notified at start of coredumping, instead of having to wait for it to finish and then being notified through EXIT event. Could be used for instance by process-managers that want to get notified as soon as possible about process failures, and not necessarily beeing notified after coredump, which could be in the order of minutes depending on size of coredump, piping and so on. Signed-off-by: Jesper Derehag Signed-off-by: David S. Miller --- drivers/connector/cn_proc.c | 25 +++++++++++++++++++++++++ include/linux/cn_proc.h | 4 ++++ include/uapi/linux/cn_proc.h | 10 +++++++++- kernel/signal.c | 2 ++ 4 files changed, 40 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 1110478dd0fd..08ae128cce9b 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -232,6 +232,31 @@ void proc_comm_connector(struct task_struct *task) cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } +void proc_coredump_connector(struct task_struct *task) +{ + struct cn_msg *msg; + struct proc_event *ev; + __u8 buffer[CN_PROC_MSG_SIZE]; + struct timespec ts; + + if (atomic_read(&proc_event_num_listeners) < 1) + return; + + msg = (struct cn_msg *)buffer; + ev = (struct proc_event *)msg->data; + get_seq(&msg->seq, &ev->cpu); + ktime_get_ts(&ts); /* get high res monotonic timestamp */ + put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->what = PROC_EVENT_COREDUMP; + ev->event_data.coredump.process_pid = task->pid; + ev->event_data.coredump.process_tgid = task->tgid; + + memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); + msg->ack = 0; /* not used */ + msg->len = sizeof(*ev); + cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); +} + void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index 2c1bc1ea04ee..1d5b02a96c46 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -26,6 +26,7 @@ void proc_id_connector(struct task_struct *task, int which_id); void proc_sid_connector(struct task_struct *task); void proc_ptrace_connector(struct task_struct *task, int which_id); void proc_comm_connector(struct task_struct *task); +void proc_coredump_connector(struct task_struct *task); void proc_exit_connector(struct task_struct *task); #else static inline void proc_fork_connector(struct task_struct *task) @@ -48,6 +49,9 @@ static inline void proc_ptrace_connector(struct task_struct *task, int ptrace_id) {} +static inline void proc_coredump_connector(struct task_struct *task) +{} + static inline void proc_exit_connector(struct task_struct *task) {} #endif /* CONFIG_PROC_EVENTS */ diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h index 0d7b49973bb3..f6c271035bbd 100644 --- a/include/uapi/linux/cn_proc.h +++ b/include/uapi/linux/cn_proc.h @@ -56,7 +56,9 @@ struct proc_event { PROC_EVENT_PTRACE = 0x00000100, PROC_EVENT_COMM = 0x00000200, /* "next" should be 0x00000400 */ - /* "last" is the last process event: exit */ + /* "last" is the last process event: exit, + * while "next to last" is coredumping event */ + PROC_EVENT_COREDUMP = 0x40000000, PROC_EVENT_EXIT = 0x80000000 } what; __u32 cpu; @@ -110,11 +112,17 @@ struct proc_event { char comm[16]; } comm; + struct coredump_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + } coredump; + struct exit_proc_event { __kernel_pid_t process_pid; __kernel_pid_t process_tgid; __u32 exit_code, exit_signal; } exit; + } event_data; }; diff --git a/kernel/signal.c b/kernel/signal.c index dd72567767d9..497330ec2ae9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -32,6 +32,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2350,6 +2351,7 @@ relock: if (sig_kernel_coredump(signr)) { if (print_fatal_signals) print_fatal_signal(info->si_signo); + proc_coredump_connector(current); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with -- cgit v1.2.3-70-g09d2 From 9b44190dc114c1720b34975b5bfc65aece112ced Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 20 Mar 2013 13:32:58 +0000 Subject: tcp: refactor F-RTO The patch series refactor the F-RTO feature (RFC4138/5682). This is to simplify the loss recovery processing. Existing F-RTO was developed during the experimental stage (RFC4138) and has many experimental features. It takes a separate code path from the traditional timeout processing by overloading CA_Disorder instead of using CA_Loss state. This complicates CA_Disorder state handling because it's also used for handling dubious ACKs and undos. While the algorithm in the RFC does not change the congestion control, the implementation intercepts congestion control in various places (e.g., frto_cwnd in tcp_ack()). The new code implements newer F-RTO RFC5682 using CA_Loss processing path. F-RTO becomes a small extension in the timeout processing and interfaces with congestion control and Eifel undo modules. It lets congestion control (module) determines how many to send independently. F-RTO only chooses what to send in order to detect spurious retranmission. If timeout is found spurious it invokes existing Eifel undo algorithms like DSACK or TCP timestamp based detection. The first patch removes all F-RTO code except the sysctl_tcp_frto is left for the new implementation. Since CA_EVENT_FRTO is removed, TCP westwood now computes ssthresh on regular timeout CA_EVENT_LOSS event. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 17 -- include/linux/tcp.h | 6 +- include/net/tcp.h | 4 - net/ipv4/sysctl_net_ipv4.c | 7 - net/ipv4/tcp_input.c | 375 +-------------------------------- net/ipv4/tcp_minisocks.c | 3 - net/ipv4/tcp_output.c | 11 +- net/ipv4/tcp_timer.c | 6 +- net/ipv4/tcp_westwood.c | 2 +- 9 files changed, 10 insertions(+), 421 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 17953e2bc3e9..8a977a0aaede 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -239,23 +239,6 @@ tcp_frto - INTEGER interacts badly with the packet counting of the SACK enabled TCP flow. -tcp_frto_response - INTEGER - When F-RTO has detected that a TCP retransmission timeout was - spurious (i.e, the timeout would have been avoided had TCP set a - longer retransmission timeout), TCP has several options what to do - next. Possible values are: - 0 Rate halving based; a smooth and conservative response, - results in halved cwnd and ssthresh after one RTT - 1 Very conservative response; not recommended because even - though being valid, it interacts poorly with the rest of - Linux TCP, halves cwnd and ssthresh immediately - 2 Aggressive response; undoes congestion control measures - that are now known to be unnecessary (ignoring the - possibility of a lost retransmission that would require - TCP to be more cautious), cwnd and ssthresh are restored - to the values prior timeout - Default: 0 (rate halving based) - tcp_keepalive_time - INTEGER How often TCP sends out keepalive messages when keepalive is enabled. Default: 2hours. diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ed6a7456eecd..f5f203b36379 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -187,14 +187,12 @@ struct tcp_sock { u32 window_clamp; /* Maximal window to advertise */ u32 rcv_ssthresh; /* Current window clamp */ - u32 frto_highmark; /* snd_nxt when RTO occurred */ u16 advmss; /* Advertised MSS */ - u8 frto_counter; /* Number of new acks after RTO */ + u8 unused; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ thin_dupack : 1,/* Fast retransmit on first dupack */ - repair : 1, - unused : 1; + repair : 1; u8 repair_queue; u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ syn_data:1, /* SYN includes data */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 7f2f17198d75..d1dcb596230e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -272,7 +272,6 @@ extern int sysctl_tcp_app_win; extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; -extern int sysctl_tcp_frto_response; extern int sysctl_tcp_low_latency; extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; @@ -424,8 +423,6 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, bool fastopen); extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); -extern bool tcp_use_frto(struct sock *sk); -extern void tcp_enter_frto(struct sock *sk); extern void tcp_enter_loss(struct sock *sk, int how); extern void tcp_clear_retrans(struct tcp_sock *tp); extern void tcp_update_metrics(struct sock *sk); @@ -756,7 +753,6 @@ enum tcp_ca_event { CA_EVENT_TX_START, /* first transmit when no packets in flight */ CA_EVENT_CWND_RESTART, /* congestion window restart */ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ - CA_EVENT_FRTO, /* fast recovery timeout */ CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_FAST_ACK, /* in sequence ack */ CA_EVENT_SLOW_ACK, /* other ack */ diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index cb45062c8be0..fa2f63fc453b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -591,13 +591,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_frto_response", - .data = &sysctl_tcp_frto_response, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_low_latency", .data = &sysctl_tcp_low_latency, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 19f0149fb6a2..231c79fe91f3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -93,7 +93,6 @@ int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; -int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_thin_dupack __read_mostly; @@ -108,17 +107,14 @@ int sysctl_tcp_early_retrans __read_mostly = 3; #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ -#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ -#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) -#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) @@ -1159,10 +1155,6 @@ static u8 tcp_sacktag_one(struct sock *sk, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); - - /* SACK enhanced F-RTO (RFC4138; Appendix B) */ - if (!after(end_seq, tp->frto_highmark)) - state->flag |= FLAG_ONLY_ORIG_SACKED; } if (sacked & TCPCB_LOST) { @@ -1555,7 +1547,6 @@ static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una) { - const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); @@ -1728,12 +1719,6 @@ walk: start_seq, end_seq, dup_sack); advance_sp: - /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct - * due to in-order walk - */ - if (after(end_seq, tp->frto_highmark)) - state.flag &= ~FLAG_ONLY_ORIG_SACKED; - i++; } @@ -1750,8 +1735,7 @@ advance_sp: tcp_verify_left_out(tp); if ((state.reord < tp->fackets_out) && - ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && - (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) + ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); out: @@ -1825,197 +1809,6 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) tp->sacked_out = 0; } -static int tcp_is_sackfrto(const struct tcp_sock *tp) -{ - return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); -} - -/* F-RTO can only be used if TCP has never retransmitted anything other than - * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) - */ -bool tcp_use_frto(struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - const struct inet_connection_sock *icsk = inet_csk(sk); - struct sk_buff *skb; - - if (!sysctl_tcp_frto) - return false; - - /* MTU probe and F-RTO won't really play nicely along currently */ - if (icsk->icsk_mtup.probe_size) - return false; - - if (tcp_is_sackfrto(tp)) - return true; - - /* Avoid expensive walking of rexmit queue if possible */ - if (tp->retrans_out > 1) - return false; - - skb = tcp_write_queue_head(sk); - if (tcp_skb_is_last(sk, skb)) - return true; - skb = tcp_write_queue_next(sk, skb); /* Skips head */ - tcp_for_write_queue_from(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - return false; - /* Short-circuit when first non-SACKed skb has been checked */ - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) - break; - } - return true; -} - -/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO - * recovery a bit and use heuristics in tcp_process_frto() to detect if - * the RTO was spurious. Only clear SACKED_RETRANS of the head here to - * keep retrans_out counting accurate (with SACK F-RTO, other than head - * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS - * bits are handled if the Loss state is really to be entered (in - * tcp_enter_frto_loss). - * - * Do like tcp_enter_loss() would; when RTO expires the second time it - * does: - * "Reduce ssthresh if it has not yet been made inside this window." - */ -void tcp_enter_frto(struct sock *sk) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || - tp->snd_una == tp->high_seq || - ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && - !icsk->icsk_retransmits)) { - tp->prior_ssthresh = tcp_current_ssthresh(sk); - /* Our state is too optimistic in ssthresh() call because cwnd - * is not reduced until tcp_enter_frto_loss() when previous F-RTO - * recovery has not yet completed. Pattern would be this: RTO, - * Cumulative ACK, RTO (2xRTO for the same segment does not end - * up here twice). - * RFC4138 should be more specific on what to do, even though - * RTO is quite unlikely to occur after the first Cumulative ACK - * due to back-off and complexity of triggering events ... - */ - if (tp->frto_counter) { - u32 stored_cwnd; - stored_cwnd = tp->snd_cwnd; - tp->snd_cwnd = 2; - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - tp->snd_cwnd = stored_cwnd; - } else { - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - } - /* ... in theory, cong.control module could do "any tricks" in - * ssthresh(), which means that ca_state, lost bits and lost_out - * counter would have to be faked before the call occurs. We - * consider that too expensive, unlikely and hacky, so modules - * using these in ssthresh() must deal these incompatibility - * issues if they receives CA_EVENT_FRTO and frto_counter != 0 - */ - tcp_ca_event(sk, CA_EVENT_FRTO); - } - - tp->undo_marker = tp->snd_una; - tp->undo_retrans = 0; - - skb = tcp_write_queue_head(sk); - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - tp->undo_marker = 0; - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - } - tcp_verify_left_out(tp); - - /* Too bad if TCP was application limited */ - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); - - /* Earlier loss recovery underway (see RFC4138; Appendix B). - * The last condition is necessary at least in tp->frto_counter case. - */ - if (tcp_is_sackfrto(tp) && (tp->frto_counter || - ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && - after(tp->high_seq, tp->snd_una)) { - tp->frto_highmark = tp->high_seq; - } else { - tp->frto_highmark = tp->snd_nxt; - } - tcp_set_ca_state(sk, TCP_CA_Disorder); - tp->high_seq = tp->snd_nxt; - tp->frto_counter = 1; -} - -/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, - * which indicates that we should follow the traditional RTO recovery, - * i.e. mark everything lost and do go-back-N retransmission. - */ -static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - tp->lost_out = 0; - tp->retrans_out = 0; - if (tcp_is_reno(tp)) - tcp_reset_reno_sack(tp); - - tcp_for_write_queue(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - - TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; - /* - * Count the retransmission made on RTO correctly (only when - * waiting for the first ACK and did not get it)... - */ - if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { - /* For some reason this R-bit might get cleared? */ - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) - tp->retrans_out += tcp_skb_pcount(skb); - /* ...enter this if branch just for the first segment */ - flag |= FLAG_DATA_ACKED; - } else { - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - } - - /* Marking forward transmissions that were made after RTO lost - * can cause unnecessary retransmissions in some scenarios, - * SACK blocks will mitigate that in some but not in all cases. - * We used to not mark them but it was causing break-ups with - * receivers that do only in-order receival. - * - * TODO: we could detect presence of such receiver and select - * different behavior per flow. - */ - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { - TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; - tp->lost_out += tcp_skb_pcount(skb); - tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; - } - } - tcp_verify_left_out(tp); - - tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; - tp->snd_cwnd_cnt = 0; - tp->snd_cwnd_stamp = tcp_time_stamp; - tp->frto_counter = 0; - - tp->reordering = min_t(unsigned int, tp->reordering, - sysctl_tcp_reordering); - tcp_set_ca_state(sk, TCP_CA_Loss); - tp->high_seq = tp->snd_nxt; - TCP_ECN_queue_cwr(tp); - - tcp_clear_all_retrans_hints(tp); -} - static void tcp_clear_retrans_partial(struct tcp_sock *tp) { tp->retrans_out = 0; @@ -2090,8 +1883,6 @@ void tcp_enter_loss(struct sock *sk, int how) tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); - /* Abort F-RTO algorithm if one is in progress */ - tp->frto_counter = 0; } /* If ACK arrived pointing to a remembered SACK, it means that our @@ -2275,10 +2066,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; - /* Do not perform any recovery during F-RTO algorithm */ - if (tp->frto_counter) - return false; - /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; @@ -2760,7 +2547,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked) tcp_verify_left_out(tp); - if (!tp->frto_counter && !tcp_any_retrans_done(sk)) + if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) @@ -3198,8 +2985,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, flag |= FLAG_RETRANS_DATA_ACKED; ca_seq_rtt = -1; seq_rtt = -1; - if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) - flag |= FLAG_NONHEAD_RETRANS_ACKED; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; @@ -3408,150 +3193,6 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 return flag; } -/* A very conservative spurious RTO response algorithm: reduce cwnd and - * continue in congestion avoidance. - */ -static void tcp_conservative_spur_to_response(struct tcp_sock *tp) -{ - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - tp->snd_cwnd_cnt = 0; - TCP_ECN_queue_cwr(tp); - tcp_moderate_cwnd(tp); -} - -/* A conservative spurious RTO response algorithm: reduce cwnd using - * PRR and continue in congestion avoidance. - */ -static void tcp_cwr_spur_to_response(struct sock *sk) -{ - tcp_enter_cwr(sk, 0); -} - -static void tcp_undo_spur_to_response(struct sock *sk, int flag) -{ - if (flag & FLAG_ECE) - tcp_cwr_spur_to_response(sk); - else - tcp_undo_cwr(sk, true); -} - -/* F-RTO spurious RTO detection algorithm (RFC4138) - * - * F-RTO affects during two new ACKs following RTO (well, almost, see inline - * comments). State (ACK number) is kept in frto_counter. When ACK advances - * window (but not to or beyond highest sequence sent before RTO): - * On First ACK, send two new segments out. - * On Second ACK, RTO was likely spurious. Do spurious response (response - * algorithm is not part of the F-RTO detection algorithm - * given in RFC4138 but can be selected separately). - * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss - * and TCP falls back to conventional RTO recovery. F-RTO allows overriding - * of Nagle, this is done using frto_counter states 2 and 3, when a new data - * segment of any size sent during F-RTO, state 2 is upgraded to 3. - * - * Rationale: if the RTO was spurious, new ACKs should arrive from the - * original window even after we transmit two new data segments. - * - * SACK version: - * on first step, wait until first cumulative ACK arrives, then move to - * the second step. In second step, the next ACK decides. - * - * F-RTO is implemented (mainly) in four functions: - * - tcp_use_frto() is used to determine if TCP is can use F-RTO - * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is - * called when tcp_use_frto() showed green light - * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm - * - tcp_enter_frto_loss() is called if there is not enough evidence - * to prove that the RTO is indeed spurious. It transfers the control - * from F-RTO to the conventional RTO recovery - */ -static bool tcp_process_frto(struct sock *sk, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tcp_verify_left_out(tp); - - /* Duplicate the behavior from Loss state (fastretrans_alert) */ - if (flag & FLAG_DATA_ACKED) - inet_csk(sk)->icsk_retransmits = 0; - - if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || - ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) - tp->undo_marker = 0; - - if (!before(tp->snd_una, tp->frto_highmark)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); - return true; - } - - if (!tcp_is_sackfrto(tp)) { - /* RFC4138 shortcoming in step 2; should also have case c): - * ACK isn't duplicate nor advances window, e.g., opposite dir - * data, winupdate - */ - if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) - return true; - - if (!(flag & FLAG_DATA_ACKED)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), - flag); - return true; - } - } else { - if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { - if (!tcp_packets_in_flight(tp)) { - tcp_enter_frto_loss(sk, 2, flag); - return true; - } - - /* Prevent sending of new data. */ - tp->snd_cwnd = min(tp->snd_cwnd, - tcp_packets_in_flight(tp)); - return true; - } - - if ((tp->frto_counter >= 2) && - (!(flag & FLAG_FORWARD_PROGRESS) || - ((flag & FLAG_DATA_SACKED) && - !(flag & FLAG_ONLY_ORIG_SACKED)))) { - /* RFC4138 shortcoming (see comment above) */ - if (!(flag & FLAG_FORWARD_PROGRESS) && - (flag & FLAG_NOT_DUP)) - return true; - - tcp_enter_frto_loss(sk, 3, flag); - return true; - } - } - - if (tp->frto_counter == 1) { - /* tcp_may_send_now needs to see updated state */ - tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; - tp->frto_counter = 2; - - if (!tcp_may_send_now(sk)) - tcp_enter_frto_loss(sk, 2, flag); - - return true; - } else { - switch (sysctl_tcp_frto_response) { - case 2: - tcp_undo_spur_to_response(sk, flag); - break; - case 1: - tcp_conservative_spur_to_response(tp); - break; - default: - tcp_cwr_spur_to_response(sk); - break; - } - tp->frto_counter = 0; - tp->undo_marker = 0; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); - } - return false; -} - /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk) { @@ -3616,7 +3257,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - bool frto_cwnd = false; /* If the ack is older than previous acks * then we can probably ignore it. @@ -3690,22 +3330,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) pkts_acked = prior_packets - tp->packets_out; - if (tp->frto_counter) - frto_cwnd = tcp_process_frto(sk, flag); - /* Guarantee sacktag reordering detection against wrap-arounds */ - if (before(tp->frto_highmark, tp->snd_una)) - tp->frto_highmark = 0; - if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ - if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && - tcp_may_raise_cwnd(sk, flag)) + if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } else { - if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) + if (flag & FLAG_DATA_ACKED) tcp_cong_avoid(sk, ack, prior_in_flight); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 8f0234f8bb95..05eaf8904613 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -422,9 +422,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->snd_cwnd = TCP_INIT_CWND; newtp->snd_cwnd_cnt = 0; - newtp->frto_counter = 0; - newtp->frto_highmark = 0; - if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && !try_module_get(newicsk->icsk_ca_ops->owner)) newicsk->icsk_ca_ops = &tcp_init_congestion_ops; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e787ecec505e..163cf5fc0119 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -78,10 +78,6 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; - /* Don't override Nagle indefinitely with F-RTO */ - if (tp->frto_counter == 2) - tp->frto_counter = 3; - tp->packets_out += tcp_skb_pcount(skb); if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) @@ -1470,11 +1466,8 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf if (nonagle & TCP_NAGLE_PUSH) return true; - /* Don't use the nagle rule for urgent data (or for the final FIN). - * Nagle can be ignored during F-RTO too (see RFC4138). - */ - if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || - (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) + /* Don't use the nagle rule for urgent data (or for the final FIN). */ + if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) return true; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index eeccf795e917..4b85e6f636c9 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -416,11 +416,7 @@ void tcp_retransmit_timer(struct sock *sk) NET_INC_STATS_BH(sock_net(sk), mib_idx); } - if (tcp_use_frto(sk)) { - tcp_enter_frto(sk); - } else { - tcp_enter_loss(sk, 0); - } + tcp_enter_loss(sk, 0); if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { /* Retransmission failed because of local congestion, diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index 1b91bf48e277..76a1e23259e1 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -236,7 +236,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); break; - case CA_EVENT_FRTO: + case CA_EVENT_LOSS: tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); /* Update RTT_min when next ack arrives */ w->reset_rtt_min = 1; -- cgit v1.2.3-70-g09d2 From e33099f96d99c391b3325caa9c44258de04aae86 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 20 Mar 2013 13:33:00 +0000 Subject: tcp: implement RFC5682 F-RTO This patch implements F-RTO (foward RTO recovery): When the first retransmission after timeout is acknowledged, F-RTO sends new data instead of old data. If the next ACK acknowledges some never-retransmitted data, then the timeout was spurious and the congestion state is reverted. Otherwise if the next ACK selectively acknowledges the new data, then the timeout was genuine and the loss recovery continues. This idea applies to recurring timeouts as well. While F-RTO sends different data during timeout recovery, it does not (and should not) change the congestion control. The implementaion follows the three steps of SACK enhanced algorithm (section 3) in RFC5682. Step 1 is in tcp_enter_loss(). Step 2 and 3 are in tcp_process_loss(). The basic version is not supported because SACK enhanced version also works for non-SACK connections. The new implementation is functionally in parity with the old F-RTO implementation except the one case where it increases undo events: In addition to the RFC algorithm, a spurious timeout may be detected without sending data in step 2, as long as the SACK confirms not all the original data are dropped. When this happens, the sender will undo the cwnd and perhaps enter fast recovery instead. This additional check increases the F-RTO undo events by 5x compared to the prior implementation on Google Web servers, since the sender often does not have new data to send for HTTP. Note F-RTO may detect spurious timeout before Eifel with timestamps does so. Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 18 +++------ include/linux/tcp.h | 3 +- net/ipv4/tcp_input.c | 73 ++++++++++++++++++++++++++++------ 3 files changed, 68 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 8a977a0aaede..f98ca633b528 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -225,19 +225,13 @@ tcp_fin_timeout - INTEGER Default: 60 seconds tcp_frto - INTEGER - Enables Forward RTO-Recovery (F-RTO) defined in RFC4138. + Enables Forward RTO-Recovery (F-RTO) defined in RFC5682. F-RTO is an enhanced recovery algorithm for TCP retransmission - timeouts. It is particularly beneficial in wireless environments - where packet loss is typically due to random radio interference - rather than intermediate router congestion. F-RTO is sender-side - only modification. Therefore it does not require any support from - the peer. - - If set to 1, basic version is enabled. 2 enables SACK enhanced - F-RTO if flow uses SACK. The basic version can be used also when - SACK is in use though scenario(s) with it exists where F-RTO - interacts badly with the packet counting of the SACK enabled TCP - flow. + timeouts. It is particularly beneficial in networks where the + RTT fluctuates (e.g., wireless). F-RTO is sender-side only + modification. It does not require any support from the peer. + + By default it's enabled with a non-zero value. 0 disables F-RTO. tcp_keepalive_time - INTEGER How often TCP sends out keepalive messages when keepalive is enabled. diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f5f203b36379..5adbc33d1ab3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -192,7 +192,8 @@ struct tcp_sock { u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ thin_dupack : 1,/* Fast retransmit on first dupack */ - repair : 1; + repair : 1, + frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ syn_data:1, /* SYN includes data */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8d821e45b917..b2b36196b342 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -107,6 +107,7 @@ int sysctl_tcp_early_retrans __read_mostly = 3; #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ +#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ @@ -1155,6 +1156,8 @@ static u8 tcp_sacktag_one(struct sock *sk, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); + if (!after(end_seq, tp->high_seq)) + state->flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_LOST) { @@ -1835,10 +1838,13 @@ void tcp_enter_loss(struct sock *sk, int how) const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; + bool new_recovery = false; /* Reduce ssthresh if it has not yet been made inside this window. */ - if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || + if (icsk->icsk_ca_state <= TCP_CA_Disorder || + !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { + new_recovery = true; tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); @@ -1883,6 +1889,14 @@ void tcp_enter_loss(struct sock *sk, int how) tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); + + /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous + * loss recovery is underway except recurring timeout(s) on + * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing + */ + tp->frto = sysctl_tcp_frto && + (new_recovery || icsk->icsk_retransmits) && + !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our @@ -2426,12 +2440,12 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) return failed; } -/* Undo during loss recovery after partial ACK. */ -static bool tcp_try_undo_loss(struct sock *sk) +/* Undo during loss recovery after partial ACK or using F-RTO. */ +static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk); - if (tcp_may_undo(tp)) { + if (frto_undo || tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) @@ -2445,9 +2459,12 @@ static bool tcp_try_undo_loss(struct sock *sk) tp->lost_out = 0; tcp_undo_cwr(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); + if (frto_undo) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; - if (tcp_is_sack(tp)) + if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } @@ -2667,24 +2684,52 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are * recovered or spurious. Otherwise retransmits more on partial ACKs. */ -static void tcp_process_loss(struct sock *sk, int flag) +static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + bool recovered = !before(tp->snd_una, tp->high_seq); - if (!before(tp->snd_una, tp->high_seq)) { + if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ + if (flag & FLAG_ORIG_SACK_ACKED) { + /* Step 3.b. A timeout is spurious if not all data are + * lost, i.e., never-retransmitted data are (s)acked. + */ + tcp_try_undo_loss(sk, true); + return; + } + if (after(tp->snd_nxt, tp->high_seq) && + (flag & FLAG_DATA_SACKED || is_dupack)) { + tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ + } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { + tp->high_seq = tp->snd_nxt; + __tcp_push_pending_frames(sk, tcp_current_mss(sk), + TCP_NAGLE_OFF); + if (after(tp->snd_nxt, tp->high_seq)) + return; /* Step 2.b */ + tp->frto = 0; + } + } + + if (recovered) { + /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ icsk->icsk_retransmits = 0; tcp_try_undo_recovery(sk); return; } - if (flag & FLAG_DATA_ACKED) icsk->icsk_retransmits = 0; - if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) - tcp_reset_reno_sack(tp); - if (tcp_try_undo_loss(sk)) + if (tcp_is_reno(tp)) { + /* A Reno DUPACK means new data in F-RTO step 2.b above are + * delivered. Lower inflight to clock out (re)tranmissions. + */ + if (after(tp->snd_nxt, tp->high_seq) && is_dupack) + tcp_add_reno_sack(sk); + else if (flag & FLAG_SND_UNA_ADVANCED) + tcp_reset_reno_sack(tp); + } + if (tcp_try_undo_loss(sk, false)) return; - tcp_moderate_cwnd(tp); tcp_xmit_retransmit_queue(sk); } @@ -2764,7 +2809,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: - tcp_process_loss(sk, flag); + tcp_process_loss(sk, flag, is_dupack); if (icsk->icsk_ca_state != TCP_CA_Open) return; /* Fall through to processing in Open state. */ @@ -3003,6 +3048,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(pkts_acked, reord); + if (!after(scb->end_seq, tp->high_seq)) + flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_SACKED_ACKED) -- cgit v1.2.3-70-g09d2 From 79617801ea0c0e6664cb497d4c1892c2ff407364 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 21 Mar 2013 22:22:03 +0100 Subject: filter: bpf_jit_comp: refactor and unify BPF JIT image dump output If bpf_jit_enable > 1, then we dump the emitted JIT compiled image after creation. Currently, only SPARC and PowerPC has similar output as in the reference implementation on x86_64. Make a small helper function in order to reduce duplicated code and make the dump output uniform across architectures x86_64, SPARC, PPC, ARM (e.g. on ARM flen, pass and proglen are currently not shown, but would be interesting to know as well), also for future BPF JIT implementations on other archs. Cc: Mircea Gherzan Cc: Matt Evans Cc: Eric Dumazet Cc: David S. Miller Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/arm/net/bpf_jit_32.c | 5 ++--- arch/powerpc/net/bpf_jit_comp.c | 12 ++++-------- arch/sparc/net/bpf_jit_comp.c | 6 +----- arch/x86/net/bpf_jit_comp.c | 9 ++------- include/linux/filter.h | 10 ++++++++++ 5 files changed, 19 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a0bd8a755bdf..1a643ee8e082 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -918,9 +918,8 @@ void bpf_jit_compile(struct sk_filter *fp) #endif if (bpf_jit_enable > 1) - print_hex_dump(KERN_INFO, "BPF JIT code: ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx.target, - alloc_size, false); + /* there are 2 passes here */ + bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); fp->bpf_func = (void *)ctx.target; out: diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e834f1ec23c8..c427ae36374a 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -671,16 +671,12 @@ void bpf_jit_compile(struct sk_filter *fp) } if (bpf_jit_enable > 1) - pr_info("flen=%d proglen=%u pass=%d image=%p\n", - flen, proglen, pass, image); + /* Note that we output the base address of the code_base + * rather than image, since opcodes are in code_base. + */ + bpf_jit_dump(flen, proglen, pass, code_base); if (image) { - if (bpf_jit_enable > 1) - print_hex_dump(KERN_ERR, "JIT code: ", - DUMP_PREFIX_ADDRESS, - 16, 1, code_base, - proglen, false); - bpf_flush_icache(code_base, code_base + (proglen/4)); /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 3109ca684a99..d36a85ebb5e0 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -795,13 +795,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; } if (bpf_jit_enable > 1) - pr_err("flen=%d proglen=%u pass=%d image=%p\n", - flen, proglen, pass, image); + bpf_jit_dump(flen, proglen, pass, image); if (image) { - if (bpf_jit_enable > 1) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, - 16, 1, image, proglen, false); bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3cbe45381bbb..f66b54086ce5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -725,17 +725,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; } oldproglen = proglen; } + if (bpf_jit_enable > 1) - pr_err("flen=%d proglen=%u pass=%d image=%p\n", - flen, proglen, pass, image); + bpf_jit_dump(flen, proglen, pass, image); if (image) { - if (bpf_jit_enable > 1) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, - 16, 1, image, proglen, false); - bpf_flush_icache(image, image + proglen); - fp->bpf_func = (void *)image; } out: diff --git a/include/linux/filter.h b/include/linux/filter.h index d2059cb4e465..d7d25083130b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -50,6 +50,16 @@ extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, uns #ifdef CONFIG_BPF_JIT extern void bpf_jit_compile(struct sk_filter *fp); extern void bpf_jit_free(struct sk_filter *fp); + +static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + pr_err("flen=%u proglen=%u pass=%u image=%p\n", + flen, proglen, pass, image); + if (image) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + 16, 1, image, proglen, false); +} #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) #else static inline void bpf_jit_compile(struct sk_filter *fp) -- cgit v1.2.3-70-g09d2 From 19dde0bd71e3dffb03ddc509019e22250f4e20c0 Mon Sep 17 00:00:00 2001 From: Janusz Dziedzic Date: Thu, 21 Mar 2013 15:47:54 +0100 Subject: cfg80211: add P2P Notice of Absence attribute Add P2P Notice of Absence attribute structure. Signed-off-by: Janusz Dziedzic Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4cf0c9e4dd99..d10b5bba3268 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1027,6 +1027,26 @@ enum ieee80211_p2p_attr_id { IEEE80211_P2P_ATTR_MAX }; +/* Notice of Absence attribute - described in P2P spec 4.1.14 */ +/* Typical max value used here */ +#define IEEE80211_P2P_NOA_DESC_MAX 4 + +struct ieee80211_p2p_noa_desc { + u8 count; + __le32 duration; + __le32 interval; + __le32 start_time; +} __packed; + +struct ieee80211_p2p_noa_attr { + u8 index; + u8 oppps_ctwindow; + struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX]; +} __packed; + +#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7) +#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F + /** * struct ieee80211_bar - HT Block Ack Request * -- cgit v1.2.3-70-g09d2 From 9d0ca6ed6f2f12eb488f450d5d38d047aa402a53 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 21 Mar 2013 14:17:34 +0000 Subject: virtio: remove obsolete virtqueue_get_queue_index() You can access it directly now, since 3.8: v3.7-rc1-13-g06ca287 'virtio: move queue_index and num_free fields into core struct virtqueue.' Cc: Cornelia Huck Signed-off-by: Rusty Russell Acked-by: Cornelia Huck Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 4 ++-- drivers/s390/kvm/virtio_ccw.c | 6 +++--- include/linux/virtio.h | 6 ------ 3 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 57ac4b0294bc..f7d67e8eb1aa 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -154,7 +154,7 @@ struct padded_vnet_hdr { */ static int vq2txq(struct virtqueue *vq) { - return (virtqueue_get_queue_index(vq) - 1) / 2; + return (vq->index - 1) / 2; } static int txq2vq(int txq) @@ -164,7 +164,7 @@ static int txq2vq(int txq) static int vq2rxq(struct virtqueue *vq) { - return virtqueue_get_queue_index(vq) / 2; + return vq->index / 2; } static int rxq2vq(int rxq) diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 2029b6caa595..fb877b59ec57 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -166,7 +166,7 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq) vcdev = to_vc_device(info->vq->vdev); ccw_device_get_schid(vcdev->cdev, &schid); - do_kvm_notify(schid, virtqueue_get_queue_index(vq)); + do_kvm_notify(schid, vq->index); } static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, @@ -188,7 +188,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) unsigned long flags; unsigned long size; int ret; - unsigned int index = virtqueue_get_queue_index(vq); + unsigned int index = vq->index; /* Remove from our list. */ spin_lock_irqsave(&vcdev->lock, flags); @@ -610,7 +610,7 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, vq = NULL; spin_lock_irqsave(&vcdev->lock, flags); list_for_each_entry(info, &vcdev->virtqueues, node) { - if (virtqueue_get_queue_index(info->vq) == index) { + if (info->vq->index == index) { vq = info->vq; break; } diff --git a/include/linux/virtio.h b/include/linux/virtio.h index ff6714e6d0f5..2d7a5e045908 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -58,12 +58,6 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); -/* FIXME: Obsolete accessor, but required for virtio_net merge. */ -static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq) -{ - return vq->index; -} - /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus -- cgit v1.2.3-70-g09d2 From c3a07134e6aa5b93a37f72ffa3d11fadf72bf757 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 22 Mar 2013 03:39:28 +0000 Subject: mv643xx_eth: convert to use the Marvell Orion MDIO driver This patch converts the Marvell MV643XX ethernet driver to use the Marvell Orion MDIO driver. As a result, PowerPC and ARM platforms registering the Marvell MV643XX ethernet driver are also updated to register a Marvell Orion MDIO driver. This driver voluntarily overlaps with the Marvell Ethernet shared registers because it will use a subset of this shared register (shared_base + 0x4 to shared_base + 0x84). The Ethernet driver is also updated to look up for a PHY device using the Orion MDIO bus driver. For ARM and PowerPC we register a single instance of the "mvmdio" driver in the system like it used to be done with the use of the "shared_smi" platform_data cookie on ARM. Note that it is safe to register the mvmdio driver only for the "ge00" instance of the driver because this "ge00" interface is guaranteed to always be explicitely registered by consumers of arch/arm/plat-orion/common.c and other instances (ge01, ge10 and ge11) were all pointing their shared_smi to ge00. For PowerPC the in-tree Device Tree Source files mention only one MV643XX ethernet MAC instance so the MDIO bus driver is registered only when id == 0. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/arm/plat-orion/common.c | 54 ++++---- arch/powerpc/platforms/chrp/pegasos_eth.c | 20 +++ arch/powerpc/sysdev/mv64x60_dev.c | 16 ++- drivers/net/ethernet/marvell/Kconfig | 5 +- drivers/net/ethernet/marvell/Makefile | 2 +- drivers/net/ethernet/marvell/mv643xx_eth.c | 195 +++-------------------------- include/linux/mv643xx_eth.h | 1 - 7 files changed, 84 insertions(+), 209 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 2d4b6414609f..251f827271e9 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -238,6 +238,7 @@ static __init void ge_complete( struct mv643xx_eth_shared_platform_data *orion_ge_shared_data, struct resource *orion_ge_resource, unsigned long irq, struct platform_device *orion_ge_shared, + struct platform_device *orion_ge_mvmdio, struct mv643xx_eth_platform_data *eth_data, struct platform_device *orion_ge) { @@ -247,6 +248,8 @@ static __init void ge_complete( orion_ge->dev.platform_data = eth_data; platform_device_register(orion_ge_shared); + if (orion_ge_mvmdio) + platform_device_register(orion_ge_mvmdio); platform_device_register(orion_ge); } @@ -258,8 +261,6 @@ struct mv643xx_eth_shared_platform_data orion_ge00_shared_data; static struct resource orion_ge00_shared_resources[] = { { .name = "ge00 base", - }, { - .name = "ge00 err irq", }, }; @@ -271,6 +272,19 @@ static struct platform_device orion_ge00_shared = { }, }; +static struct resource orion_ge_mvmdio_resources[] = { + { + .name = "ge00 mvmdio base", + }, { + .name = "ge00 mvmdio err irq", + }, +}; + +static struct platform_device orion_ge_mvmdio = { + .name = "orion-mdio", + .id = -1, +}; + static struct resource orion_ge00_resources[] = { { .name = "ge00 irq", @@ -295,26 +309,25 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, unsigned int tx_csum_limit) { fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, - mapbase + 0x2000, SZ_16K - 1, irq_err); + mapbase + 0x2000, SZ_16K - 1, NO_IRQ); + fill_resources(&orion_ge_mvmdio, orion_ge_mvmdio_resources, + mapbase + 0x2004, 0x84 - 1, irq_err); orion_ge00_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge00_shared_data, orion_ge00_resources, irq, &orion_ge00_shared, + &orion_ge_mvmdio, eth_data, &orion_ge00); } /***************************************************************************** * GE01 ****************************************************************************/ -struct mv643xx_eth_shared_platform_data orion_ge01_shared_data = { - .shared_smi = &orion_ge00_shared, -}; +struct mv643xx_eth_shared_platform_data orion_ge01_shared_data; static struct resource orion_ge01_shared_resources[] = { { .name = "ge01 base", - }, { - .name = "ge01 err irq", - }, + } }; static struct platform_device orion_ge01_shared = { @@ -349,26 +362,23 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, unsigned int tx_csum_limit) { fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, - mapbase + 0x2000, SZ_16K - 1, irq_err); + mapbase + 0x2000, SZ_16K - 1, NO_IRQ); orion_ge01_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge01_shared_data, orion_ge01_resources, irq, &orion_ge01_shared, + NULL, eth_data, &orion_ge01); } /***************************************************************************** * GE10 ****************************************************************************/ -struct mv643xx_eth_shared_platform_data orion_ge10_shared_data = { - .shared_smi = &orion_ge00_shared, -}; +struct mv643xx_eth_shared_platform_data orion_ge10_shared_data; static struct resource orion_ge10_shared_resources[] = { { .name = "ge10 base", - }, { - .name = "ge10 err irq", - }, + } }; static struct platform_device orion_ge10_shared = { @@ -402,24 +412,21 @@ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, unsigned long irq_err) { fill_resources(&orion_ge10_shared, orion_ge10_shared_resources, - mapbase + 0x2000, SZ_16K - 1, irq_err); + mapbase + 0x2000, SZ_16K - 1, NO_IRQ); ge_complete(&orion_ge10_shared_data, orion_ge10_resources, irq, &orion_ge10_shared, + NULL, eth_data, &orion_ge10); } /***************************************************************************** * GE11 ****************************************************************************/ -struct mv643xx_eth_shared_platform_data orion_ge11_shared_data = { - .shared_smi = &orion_ge00_shared, -}; +struct mv643xx_eth_shared_platform_data orion_ge11_shared_data; static struct resource orion_ge11_shared_resources[] = { { .name = "ge11 base", - }, { - .name = "ge11 err irq", }, }; @@ -454,9 +461,10 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, unsigned long irq_err) { fill_resources(&orion_ge11_shared, orion_ge11_shared_resources, - mapbase + 0x2000, SZ_16K - 1, irq_err); + mapbase + 0x2000, SZ_16K - 1, NO_IRQ); ge_complete(&orion_ge11_shared_data, orion_ge11_resources, irq, &orion_ge11_shared, + NULL, eth_data, &orion_ge11); } diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 039fc8e82199..2b4dc6abde6c 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -47,6 +47,25 @@ static struct platform_device mv643xx_eth_shared_device = { .resource = mv643xx_eth_shared_resources, }; +/* + * The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1 + */ +static struct resource mv643xx_eth_mvmdio_resources[] = { + [0] = { + .name = "ethernet mdio base", + .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4, + .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device mv643xx_eth_mvmdio_device = { + .name = "orion-mdio", + .id = -1, + .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources), + .resource = mv643xx_eth_shared_resources, +}; + static struct resource mv643xx_eth_port1_resources[] = { [0] = { .name = "eth port1 irq", @@ -82,6 +101,7 @@ static struct platform_device eth_port1_device = { static struct platform_device *mv643xx_eth_pd_devs[] __initdata = { &mv643xx_eth_shared_device, + &mv643xx_eth_mvmdio_device, ð_port1_device, }; diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index 0f6af41ebb44..4a25c26f0bf4 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c @@ -214,15 +214,27 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev( struct device_node *np, int id) { struct platform_device *pdev; - struct resource r[1]; + struct resource r[2]; int err; err = of_address_to_resource(np, 0, &r[0]); if (err) return ERR_PTR(err); + /* register an orion mdio bus driver */ + r[1].start = r[0].start + 0x4; + r[1].end = r[0].start + 0x84 - 1; + r[1].flags = IORESOURCE_MEM; + + if (id == 0) { + pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1); + if (!pdev) + return pdev; + } + pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id, - r, 1); + &r[0], 1); + return pdev; } diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..5170ecb00acc 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -23,6 +23,7 @@ config MV643XX_ETH depends on (MV64X60 || PPC32 || PLAT_ORION) && INET select INET_LRO select PHYLIB + select MVMDIO ---help--- This driver supports the gigabit ethernet MACs in the Marvell Discovery PPC/MIPS chipset family (MV643XX) and @@ -38,9 +39,7 @@ config MVMDIO interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, Dove, Armada 370 and Armada XP). - For now, this driver is only needed for the MVNETA driver - (used on Armada 370 and XP), but it could be used in the - future by the MV643XX_ETH driver. + This driver is used by the MV643XX_ETH and MVNETA drivers. config MVNETA tristate "Marvell Armada 370/XP network interface support" diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 7f63b4aac434..5c4a7765ff0e 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -2,8 +2,8 @@ # Makefile for the Marvell device drivers. # -obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVMDIO) += mvmdio.o +obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVNETA) += mvneta.o obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d1ecf4bf7da7..a65a92ef19ec 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -69,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4"; * Registers shared between all ports. */ #define PHY_ADDR 0x0000 -#define SMI_REG 0x0004 -#define SMI_BUSY 0x10000000 -#define SMI_READ_VALID 0x08000000 -#define SMI_OPCODE_READ 0x04000000 -#define SMI_OPCODE_WRITE 0x00000000 -#define ERR_INT_CAUSE 0x0080 -#define ERR_INT_SMI_DONE 0x00000010 -#define ERR_INT_MASK 0x0084 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) @@ -265,25 +257,6 @@ struct mv643xx_eth_shared_private { */ void __iomem *base; - /* - * Points at the right SMI instance to use. - */ - struct mv643xx_eth_shared_private *smi; - - /* - * Provides access to local SMI interface. - */ - struct mii_bus *smi_bus; - - /* - * If we have access to the error interrupt pin (which is - * somewhat misnamed as it not only reflects internal errors - * but also reflects SMI completion), use that to wait for - * SMI access completion instead of polling the SMI busy bit. - */ - int err_interrupt; - wait_queue_head_t smi_busy_wait; - /* * Per-port MBUS window access register value. */ @@ -1122,97 +1095,6 @@ out_write: wrlp(mp, PORT_SERIAL_CONTROL, pscr); } -static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) -{ - struct mv643xx_eth_shared_private *msp = dev_id; - - if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { - writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); - wake_up(&msp->smi_busy_wait); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static int smi_is_done(struct mv643xx_eth_shared_private *msp) -{ - return !(readl(msp->base + SMI_REG) & SMI_BUSY); -} - -static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) -{ - if (msp->err_interrupt == NO_IRQ) { - int i; - - for (i = 0; !smi_is_done(msp); i++) { - if (i == 10) - return -ETIMEDOUT; - msleep(10); - } - - return 0; - } - - if (!smi_is_done(msp)) { - wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), - msecs_to_jiffies(100)); - if (!smi_is_done(msp)) - return -ETIMEDOUT; - } - - return 0; -} - -static int smi_bus_read(struct mii_bus *bus, int addr, int reg) -{ - struct mv643xx_eth_shared_private *msp = bus->priv; - void __iomem *smi_reg = msp->base + SMI_REG; - int ret; - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - ret = readl(smi_reg); - if (!(ret & SMI_READ_VALID)) { - pr_warn("SMI bus read not valid\n"); - return -ENODEV; - } - - return ret & 0xffff; -} - -static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) -{ - struct mv643xx_eth_shared_private *msp = bus->priv; - void __iomem *smi_reg = msp->base + SMI_REG; - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - writel(SMI_OPCODE_WRITE | (reg << 21) | - (addr << 16) | (val & 0xffff), smi_reg); - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - - /* statistics ***************************************************************/ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) { @@ -2687,47 +2569,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) if (msp->base == NULL) goto out_free; - /* - * Set up and register SMI bus. - */ - if (pd == NULL || pd->shared_smi == NULL) { - msp->smi_bus = mdiobus_alloc(); - if (msp->smi_bus == NULL) - goto out_unmap; - - msp->smi_bus->priv = msp; - msp->smi_bus->name = "mv643xx_eth smi"; - msp->smi_bus->read = smi_bus_read; - msp->smi_bus->write = smi_bus_write, - snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", - pdev->name, pdev->id); - msp->smi_bus->parent = &pdev->dev; - msp->smi_bus->phy_mask = 0xffffffff; - if (mdiobus_register(msp->smi_bus) < 0) - goto out_free_mii_bus; - msp->smi = msp; - } else { - msp->smi = platform_get_drvdata(pd->shared_smi); - } - - msp->err_interrupt = NO_IRQ; - init_waitqueue_head(&msp->smi_busy_wait); - - /* - * Check whether the error interrupt is hooked up. - */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res != NULL) { - int err; - - err = request_irq(res->start, mv643xx_eth_err_irq, - IRQF_SHARED, "mv643xx_eth", msp); - if (!err) { - writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); - msp->err_interrupt = res->start; - } - } - /* * (Re-)program MBUS remapping windows if we are asked to. */ @@ -2743,10 +2584,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) return 0; -out_free_mii_bus: - mdiobus_free(msp->smi_bus); -out_unmap: - iounmap(msp->base); out_free: kfree(msp); out: @@ -2756,14 +2593,7 @@ out: static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; - if (pd == NULL || pd->shared_smi == NULL) { - mdiobus_unregister(msp->smi_bus); - mdiobus_free(msp->smi_bus); - } - if (msp->err_interrupt != NO_IRQ) - free_irq(msp->err_interrupt, msp); iounmap(msp->base); kfree(msp); @@ -2826,14 +2656,21 @@ static void set_params(struct mv643xx_eth_private *mp, mp->txq_count = pd->tx_queue_count ? : 1; } +static void mv643xx_eth_adjust_link(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + mv643xx_adjust_pscr(mp); +} + static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { - struct mii_bus *bus = mp->shared->smi->smi_bus; struct phy_device *phydev; int start; int num; int i; + char phy_id[MII_BUS_ID_SIZE + 3]; if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { start = phy_addr_get(mp) & 0x1f; @@ -2843,17 +2680,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, num = 1; } + /* Attempt to connect to the PHY using orion-mdio */ phydev = NULL; for (i = 0; i < num; i++) { int addr = (start + i) & 0x1f; - if (bus->phy_map[addr] == NULL) - mdiobus_scan(bus, addr); + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + "orion-mdio-mii", addr); - if (phydev == NULL) { - phydev = bus->phy_map[addr]; - if (phydev != NULL) - phy_addr_set(mp, addr); + phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, + PHY_INTERFACE_MODE_GMII); + if (!IS_ERR(phydev)) { + phy_addr_set(mp, addr); + break; } } @@ -2866,8 +2705,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy_reset(mp); - phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII); - if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 49258e0ed1c6..141d395bbb5f 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -19,7 +19,6 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; - struct platform_device *shared_smi; /* * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default * limit of 9KiB will be used. -- cgit v1.2.3-70-g09d2 From c5116e9d8d2de324f13a91fe5afc308cd6b0ca93 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 19 Mar 2013 16:58:58 +0100 Subject: ssb: define more board types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- include/linux/ssb/ssb.h | 54 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 8b1322296fed..c64999fd1660 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -340,13 +340,61 @@ enum ssb_bustype { #define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ #define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ /* board_type */ +#define SSB_BOARD_BCM94301CB 0x0406 +#define SSB_BOARD_BCM94301MP 0x0407 +#define SSB_BOARD_BU4309 0x040A +#define SSB_BOARD_BCM94309CB 0x040B +#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_BU4306 0x0416 #define SSB_BOARD_BCM94306MP 0x0418 #define SSB_BOARD_BCM4309G 0x0421 #define SSB_BOARD_BCM4306CB 0x0417 -#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_BCM94306PC 0x0425 /* pcmcia 3.3v 4306 card */ +#define SSB_BOARD_BCM94306CBSG 0x042B /* with SiGe PA */ +#define SSB_BOARD_PCSG94306 0x042D /* with SiGe PA */ +#define SSB_BOARD_BU4704SD 0x042E /* with sdram */ +#define SSB_BOARD_BCM94704AGR 0x042F /* dual 11a/11g Router */ +#define SSB_BOARD_BCM94308MP 0x0430 /* 11a-only minipci */ +#define SSB_BOARD_BU4318 0x0447 +#define SSB_BOARD_CB4318 0x0448 +#define SSB_BOARD_MPG4318 0x0449 #define SSB_BOARD_MP4318 0x044A -#define SSB_BOARD_BU4306 0x0416 -#define SSB_BOARD_BU4309 0x040A +#define SSB_BOARD_SD4318 0x044B +#define SSB_BOARD_BCM94306P 0x044C /* with SiGe */ +#define SSB_BOARD_BCM94303MP 0x044E +#define SSB_BOARD_BCM94306MPM 0x0450 +#define SSB_BOARD_BCM94306MPL 0x0453 +#define SSB_BOARD_PC4303 0x0454 /* pcmcia */ +#define SSB_BOARD_BCM94306MPLNA 0x0457 +#define SSB_BOARD_BCM94306MPH 0x045B +#define SSB_BOARD_BCM94306PCIV 0x045C +#define SSB_BOARD_BCM94318MPGH 0x0463 +#define SSB_BOARD_BU4311 0x0464 +#define SSB_BOARD_BCM94311MC 0x0465 +#define SSB_BOARD_BCM94311MCAG 0x0466 +/* 4321 boards */ +#define SSB_BOARD_BU4321 0x046B +#define SSB_BOARD_BU4321E 0x047C +#define SSB_BOARD_MP4321 0x046C +#define SSB_BOARD_CB2_4321 0x046D +#define SSB_BOARD_CB2_4321_AG 0x0066 +#define SSB_BOARD_MC4321 0x046E +/* 4325 boards */ +#define SSB_BOARD_BCM94325DEVBU 0x0490 +#define SSB_BOARD_BCM94325BGABU 0x0491 +#define SSB_BOARD_BCM94325SDGWB 0x0492 +#define SSB_BOARD_BCM94325SDGMDL 0x04AA +#define SSB_BOARD_BCM94325SDGMDL2 0x04C6 +#define SSB_BOARD_BCM94325SDGMDL3 0x04C9 +#define SSB_BOARD_BCM94325SDABGWBA 0x04E1 +/* 4322 boards */ +#define SSB_BOARD_BCM94322MC 0x04A4 +#define SSB_BOARD_BCM94322USB 0x04A8 /* dualband */ +#define SSB_BOARD_BCM94322HM 0x04B0 +#define SSB_BOARD_BCM94322USB2D 0x04Bf /* single band discrete front end */ +/* 4312 boards */ +#define SSB_BOARD_BU4312 0x048A +#define SSB_BOARD_BCM4312MCGSG 0x04B5 /* chip_package */ #define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ #define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ -- cgit v1.2.3-70-g09d2 From 3e6998574fde0ab7a3329c9229394dd80462ead2 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 19 Mar 2013 16:58:59 +0100 Subject: bcma: define board types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using that IDs we can write workarounds for various cards Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- include/linux/bcma/bcma.h | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index e0ce311011c0..0ab6712fd76b 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -173,6 +173,60 @@ struct bcma_host_ops { #define BCMA_CHIP_ID_BCM53572 53572 #define BCMA_PKG_ID_BCM47188 9 +/* Board types (on PCI usually equals to the subsystem dev id) */ +/* BCM4313 */ +#define BCMA_BOARD_TYPE_BCM94313BU 0X050F +#define BCMA_BOARD_TYPE_BCM94313HM 0X0510 +#define BCMA_BOARD_TYPE_BCM94313EPA 0X0511 +#define BCMA_BOARD_TYPE_BCM94313HMG 0X051C +/* BCM4716 */ +#define BCMA_BOARD_TYPE_BCM94716NR2 0X04CD +/* BCM43224 */ +#define BCMA_BOARD_TYPE_BCM943224X21 0X056E +#define BCMA_BOARD_TYPE_BCM943224X21_FCC 0X00D1 +#define BCMA_BOARD_TYPE_BCM943224X21B 0X00E9 +#define BCMA_BOARD_TYPE_BCM943224M93 0X008B +#define BCMA_BOARD_TYPE_BCM943224M93A 0X0090 +#define BCMA_BOARD_TYPE_BCM943224X16 0X0093 +#define BCMA_BOARD_TYPE_BCM94322X9 0X008D +#define BCMA_BOARD_TYPE_BCM94322M35E 0X008E +/* BCM43228 */ +#define BCMA_BOARD_TYPE_BCM943228BU8 0X0540 +#define BCMA_BOARD_TYPE_BCM943228BU9 0X0541 +#define BCMA_BOARD_TYPE_BCM943228BU 0X0542 +#define BCMA_BOARD_TYPE_BCM943227HM4L 0X0543 +#define BCMA_BOARD_TYPE_BCM943227HMB 0X0544 +#define BCMA_BOARD_TYPE_BCM943228HM4L 0X0545 +#define BCMA_BOARD_TYPE_BCM943228SD 0X0573 +/* BCM4331 */ +#define BCMA_BOARD_TYPE_BCM94331X19 0X00D6 +#define BCMA_BOARD_TYPE_BCM94331X28 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X28B 0X010E +#define BCMA_BOARD_TYPE_BCM94331PCIEBT3AX 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X12_2G 0X00EC +#define BCMA_BOARD_TYPE_BCM94331X12_5G 0X00ED +#define BCMA_BOARD_TYPE_BCM94331X29B 0X00EF +#define BCMA_BOARD_TYPE_BCM94331CSAX 0X00EF +#define BCMA_BOARD_TYPE_BCM94331X19C 0X00F5 +#define BCMA_BOARD_TYPE_BCM94331X33 0X00F4 +#define BCMA_BOARD_TYPE_BCM94331BU 0X0523 +#define BCMA_BOARD_TYPE_BCM94331S9BU 0X0524 +#define BCMA_BOARD_TYPE_BCM94331MC 0X0525 +#define BCMA_BOARD_TYPE_BCM94331MCI 0X0526 +#define BCMA_BOARD_TYPE_BCM94331PCIEBT4 0X0527 +#define BCMA_BOARD_TYPE_BCM94331HM 0X0574 +#define BCMA_BOARD_TYPE_BCM94331PCIEDUAL 0X059B +#define BCMA_BOARD_TYPE_BCM94331MCH5 0X05A9 +#define BCMA_BOARD_TYPE_BCM94331CS 0X05C6 +#define BCMA_BOARD_TYPE_BCM94331CD 0X05DA +/* BCM53572 */ +#define BCMA_BOARD_TYPE_BCM953572BU 0X058D +#define BCMA_BOARD_TYPE_BCM953572NR2 0X058E +#define BCMA_BOARD_TYPE_BCM947188NR2 0X058F +#define BCMA_BOARD_TYPE_BCM953572SDRNR2 0X0590 +/* BCM43142 */ +#define BCMA_BOARD_TYPE_BCM943142HM 0X05E0 + struct bcma_device { struct bcma_bus *bus; struct bcma_device_id id; -- cgit v1.2.3-70-g09d2 From 12bef78f0a806639daef58b1770be6ea19b2e94d Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Thu, 21 Mar 2013 16:26:19 +0100 Subject: ssb: fix sprom constant for ant_available_{bg,a} This was done accordingly to new specs. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville --- include/linux/ssb/ssb_regs.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 6ecfa02ddbac..3a7256955b10 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -289,11 +289,11 @@ #define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 #define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ #define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ -#define SSB_SPROM4_ANTAVAIL 0x005D /* Antenna available bitfields */ -#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */ -#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0 -#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */ -#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8 +#define SSB_SPROM4_ANTAVAIL 0x005C /* Antenna available bitfields */ +#define SSB_SPROM4_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM4_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_A_SHIFT 8 #define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */ #define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ #define SSB_SPROM4_AGAIN0_SHIFT 0 -- cgit v1.2.3-70-g09d2 From 6752c8db8e0cfedb44ba62806dd15b383ed64000 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki / 吉藤英明 Date: Mon, 25 Mar 2013 08:26:16 +0000 Subject: firewire net, ipv4 arp: Extend hardware address and remove driver-level packet inspection. Inspection of upper layer protocol is considered harmful, especially if it is about ARP or other stateful upper layer protocol; driver cannot (and should not) have full state of them. IPv4 over Firewire module used to inspect ARP (both in sending path and in receiving path), and record peer's GUID, max packet size, max speed and fifo address. This patch removes such inspection by extending our "hardware address" definition to include other information as well: max packet size, max speed and fifo. By doing this, The neighbour module in networking subsystem can cache them. Note: As we have started ignoring sspd and max_rec in ARP/NDP, those information will not be used in the driver when sending. When a packet is being sent, the IP layer fills our pseudo header with the extended "hardware address", including GUID and fifo. The driver can look-up node-id (the real but rather volatile low-level address) by GUID, and then the module can send the packet to the wire using parameters provided in the extendedn hardware address. This approach is realistic because IP over IEEE1394 (RFC2734) and IPv6 over IEEE1394 (RFC3146) share same "hardware address" format in their address resolution protocols. Here, extended "hardware address" is defined as follows: union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ __be16 fifo_hi; /* hi 16bits of FIFO addr */ __be32 fifo_lo; /* lo 32bits of FIFO addr */ } __packed uc; }; Note that Hardware address is declared as union, so that we can map full IP address into this, when implementing MCAP (Multicast Cannel Allocation Protocol) for IPv6, but IP and ARP subsystem do not need to know this format in detail. One difference between original ARP (RFC826) and 1394 ARP (RFC2734) is that 1394 ARP Request/Reply do not contain the target hardware address field (aka ar$tha). This difference is handled in the ARP subsystem. CC: Stephan Gatzka Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- drivers/firewire/net.c | 153 +++++++++---------------------------------------- include/linux/if_arp.h | 12 +++- include/net/firewire.h | 25 ++++++++ net/ipv4/arp.c | 27 +++++++-- 4 files changed, 82 insertions(+), 135 deletions(-) create mode 100644 include/net/firewire.h (limited to 'include/linux') diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index cb8aa865ff88..790017eb5051 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -28,6 +28,7 @@ #include #include +#include /* rx limits */ #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ @@ -57,33 +58,6 @@ #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ -#define RFC2734_HW_ADDR_LEN 16 - -struct rfc2734_arp { - __be16 hw_type; /* 0x0018 */ - __be16 proto_type; /* 0x0806 */ - u8 hw_addr_len; /* 16 */ - u8 ip_addr_len; /* 4 */ - __be16 opcode; /* ARP Opcode */ - /* Above is exactly the same format as struct arphdr */ - - __be64 s_uniq_id; /* Sender's 64bit EUI */ - u8 max_rec; /* Sender's max packet size */ - u8 sspd; /* Sender's max speed */ - __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */ - __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */ - __be32 sip; /* Sender's IP Address */ - __be32 tip; /* IP Address of requested hw addr */ -} __packed; - -/* This header format is specific to this driver implementation. */ -#define FWNET_ALEN 8 -#define FWNET_HLEN 10 -struct fwnet_header { - u8 h_dest[FWNET_ALEN]; /* destination address */ - __be16 h_proto; /* packet type ID field */ -} __packed; - static bool fwnet_hwaddr_is_multicast(u8 *ha) { return !!(*ha & 1); @@ -196,8 +170,6 @@ struct fwnet_peer { struct list_head peer_link; struct fwnet_device *dev; u64 guid; - u64 fifo; - __be32 ip; /* guarded by dev->lock */ struct list_head pd_list; /* received partial datagrams */ @@ -226,6 +198,15 @@ struct fwnet_packet_task { u8 enqueued; }; +/* + * Get fifo address embedded in hwaddr + */ +static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) +{ + return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 + | get_unaligned_be32(&ha->uc.fifo_lo); +} + /* * saddr == NULL means use device source address. * daddr == NULL means leave destination address (eg unresolved arp). @@ -518,7 +499,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, bool is_broadcast, u16 ether_type) { struct fwnet_device *dev; - static const __be64 broadcast_hw = cpu_to_be64(~0ULL); int status; __be64 guid; @@ -537,76 +517,11 @@ static int fwnet_finish_incoming_packet(struct net_device *net, /* * Parse the encapsulation header. This actually does the job of - * converting to an ethernet frame header, as well as arp - * conversion if needed. ARP conversion is easier in this - * direction, since we are using ethernet as our backend. - */ - /* - * If this is an ARP packet, convert it. First, we want to make - * use of some of the fields, since they tell us a little bit - * about the sending machine. + * converting to an ethernet-like pseudo frame header. */ - if (ether_type == ETH_P_ARP) { - struct rfc2734_arp *arp1394; - struct arphdr *arp; - unsigned char *arp_ptr; - u64 fifo_addr; - u64 peer_guid; - struct fwnet_peer *peer; - unsigned long flags; - - arp1394 = (struct rfc2734_arp *)skb->data; - arp = (struct arphdr *)skb->data; - arp_ptr = (unsigned char *)(arp + 1); - peer_guid = get_unaligned_be64(&arp1394->s_uniq_id); - fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32 - | get_unaligned_be32(&arp1394->fifo_lo); - - spin_lock_irqsave(&dev->lock, flags); - peer = fwnet_peer_find_by_guid(dev, peer_guid); - if (peer) { - peer->fifo = fifo_addr; - peer->ip = arp1394->sip; - } - spin_unlock_irqrestore(&dev->lock, flags); - - if (!peer) { - dev_notice(&net->dev, - "no peer for ARP packet from %016llx\n", - (unsigned long long)peer_guid); - goto no_peer; - } - - /* - * Now that we're done with the 1394 specific stuff, we'll - * need to alter some of the data. Believe it or not, all - * that needs to be done is sender_IP_address needs to be - * moved, the destination hardware address get stuffed - * in and the hardware address length set to 8. - * - * IMPORTANT: The code below overwrites 1394 specific data - * needed above so keep the munging of the data for the - * higher level IP stack last. - */ - - arp->ar_hln = 8; - /* skip over sender unique id */ - arp_ptr += arp->ar_hln; - /* move sender IP addr */ - put_unaligned(arp1394->sip, (u32 *)arp_ptr); - /* skip over sender IP addr */ - arp_ptr += arp->ar_pln; - - if (arp->ar_op == htons(ARPOP_REQUEST)) - memset(arp_ptr, 0, sizeof(u64)); - else - memcpy(arp_ptr, net->dev_addr, sizeof(u64)); - } - - /* Now add the ethernet header. */ guid = cpu_to_be64(dev->card->guid); if (dev_hard_header(skb, net, ether_type, - is_broadcast ? &broadcast_hw : &guid, + is_broadcast ? net->broadcast : net->dev_addr, NULL, skb->len) >= 0) { struct fwnet_header *eth; u16 *rawp; @@ -649,7 +564,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, return 0; - no_peer: err: net->stats.rx_errors++; net->stats.rx_dropped++; @@ -1355,11 +1269,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) ptask->dest_node = IEEE1394_ALL_NODES; ptask->speed = SCODE_100; } else { - __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); + union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest; + __be64 guid = get_unaligned(&ha->uc.uniq_id); u8 generation; peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); - if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) + if (!peer) goto fail; generation = peer->generation; @@ -1367,32 +1282,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) max_payload = peer->max_payload; datagram_label_ptr = &peer->datagram_label; - ptask->fifo_addr = peer->fifo; + ptask->fifo_addr = fwnet_hwaddr_fifo(ha); ptask->generation = generation; ptask->dest_node = dest_node; ptask->speed = peer->speed; } - /* If this is an ARP packet, convert it */ - if (proto == htons(ETH_P_ARP)) { - struct arphdr *arp = (struct arphdr *)skb->data; - unsigned char *arp_ptr = (unsigned char *)(arp + 1); - struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data; - __be32 ipaddr; - - ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN)); - - arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN; - arp1394->max_rec = dev->card->max_receive; - arp1394->sspd = dev->card->link_speed; - - put_unaligned_be16(dev->local_fifo >> 32, - &arp1394->fifo_hi); - put_unaligned_be32(dev->local_fifo & 0xffffffff, - &arp1394->fifo_lo); - put_unaligned(ipaddr, &arp1394->sip); - } - ptask->hdr.w0 = 0; ptask->hdr.w1 = 0; ptask->skb = skb; @@ -1507,8 +1402,6 @@ static int fwnet_add_peer(struct fwnet_device *dev, peer->dev = dev; peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; - peer->fifo = FWNET_NO_FIFO_ADDR; - peer->ip = 0; INIT_LIST_HEAD(&peer->pd_list); peer->pdg_size = 0; peer->datagram_label = 0; @@ -1538,6 +1431,7 @@ static int fwnet_probe(struct device *_dev) struct fwnet_device *dev; unsigned max_mtu; int ret; + union fwnet_hwaddr *ha; mutex_lock(&fwnet_device_mutex); @@ -1582,8 +1476,15 @@ static int fwnet_probe(struct device *_dev) net->mtu = min(1500U, max_mtu); /* Set our hardware address while we're at it */ - put_unaligned_be64(card->guid, net->dev_addr); - put_unaligned_be64(~0ULL, net->broadcast); + ha = (union fwnet_hwaddr *)net->dev_addr; + put_unaligned_be64(card->guid, &ha->uc.uniq_id); + ha->uc.max_rec = dev->card->max_receive; + ha->uc.sspd = dev->card->link_speed; + put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); + put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); + + memset(net->broadcast, -1, net->addr_len); + ret = register_netdev(net); if (ret) goto out; @@ -1632,8 +1533,6 @@ static int fwnet_remove(struct device *_dev) mutex_lock(&fwnet_device_mutex); net = dev->netdev; - if (net && peer->ip) - arp_invalidate(net, peer->ip); fwnet_remove_peer(peer, dev); diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 89b4614a4722..f563907ed776 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -33,7 +33,15 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb) static inline int arp_hdr_len(struct net_device *dev) { - /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ - return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; + switch (dev->type) { +#if IS_ENABLED(CONFIG_FIREWIRE_NET) + case ARPHRD_IEEE1394: + /* ARP header, device address and 2 IP addresses */ + return sizeof(struct arphdr) + dev->addr_len + sizeof(u32) * 2; +#endif + default: + /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ + return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; + } } #endif /* _LINUX_IF_ARP_H */ diff --git a/include/net/firewire.h b/include/net/firewire.h new file mode 100644 index 000000000000..31bcbfe7a220 --- /dev/null +++ b/include/net/firewire.h @@ -0,0 +1,25 @@ +#ifndef _NET_FIREWIRE_H +#define _NET_FIREWIRE_H + +/* Pseudo L2 address */ +#define FWNET_ALEN 16 +union fwnet_hwaddr { + u8 u[FWNET_ALEN]; + /* "Hardware address" defined in RFC2734/RF3146 */ + struct { + __be64 uniq_id; /* EUI-64 */ + u8 max_rec; /* max packet size */ + u8 sspd; /* max speed */ + __be16 fifo_hi; /* hi 16bits of FIFO addr */ + __be32 fifo_lo; /* lo 32bits of FIFO addr */ + } __packed uc; +}; + +/* Pseudo L2 Header */ +#define FWNET_HLEN 18 +struct fwnet_header { + u8 h_dest[FWNET_ALEN]; /* destination address */ + __be16 h_proto; /* packet type ID field */ +} __packed; + +#endif diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index fea4929f6200..247ec1951c35 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -654,11 +654,19 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, arp_ptr += dev->addr_len; memcpy(arp_ptr, &src_ip, 4); arp_ptr += 4; - if (target_hw != NULL) - memcpy(arp_ptr, target_hw, dev->addr_len); - else - memset(arp_ptr, 0, dev->addr_len); - arp_ptr += dev->addr_len; + + switch (dev->type) { +#if IS_ENABLED(CONFIG_FIREWIRE_NET) + case ARPHRD_IEEE1394: + break; +#endif + default: + if (target_hw != NULL) + memcpy(arp_ptr, target_hw, dev->addr_len); + else + memset(arp_ptr, 0, dev->addr_len); + arp_ptr += dev->addr_len; + } memcpy(arp_ptr, &dest_ip, 4); return skb; @@ -781,7 +789,14 @@ static int arp_process(struct sk_buff *skb) arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, 4); arp_ptr += 4; - arp_ptr += dev->addr_len; + switch (dev_type) { +#if IS_ENABLED(CONFIG_FIREWIRE_NET) + case ARPHRD_IEEE1394: + break; +#endif + default: + arp_ptr += dev->addr_len; + } memcpy(&tip, arp_ptr, 4); /* * Check for bad requests for 127.x.x.x and requests for multicast -- cgit v1.2.3-70-g09d2 From 5203cd28db6dc05c3618a602cf4cf81203d00257 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 26 Mar 2013 23:11:21 +0000 Subject: net: core: introduce skb_probe_transport_header() Sometimes, we need probe and set the transport header for packets (e.g from untrusted source). This patch introduces a new helper skb_probe_transport_header() which tries to probe and set the l4 header through skb_flow_dissect(), if not just set the transport header to the hint passed by caller. Cc: Eric Dumazet Signed-off-by: Jason Wang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 497412165b1c..fa88b966cb8e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -32,6 +32,7 @@ #include #include #include +#include /* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 @@ -1559,6 +1560,19 @@ static inline void skb_set_transport_header(struct sk_buff *skb, skb->transport_header += offset; } +static inline void skb_probe_transport_header(struct sk_buff *skb, + const int offset_hint) +{ + struct flow_keys keys; + + if (skb_transport_header_was_set(skb)) + return; + else if (skb_flow_dissect(skb, &keys)) + skb_set_transport_header(skb, keys.thoff); + else + skb_set_transport_header(skb, offset_hint); +} + static inline unsigned char *skb_network_header(const struct sk_buff *skb) { return skb->head + skb->network_header; -- cgit v1.2.3-70-g09d2 From d6b688cf2f7ca3e168acc73597f4d7102ae663fa Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Wed, 27 Mar 2013 17:23:10 +0100 Subject: bcma: handle more devices in bcma_pmu_get_alp_clock() Add some more chip IDs to bcma_pmu_get_alp_clock() Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville --- drivers/bcma/driver_chipcommon_pmu.c | 24 ++++++++++++++++++++---- include/linux/bcma/bcma_driver_chipcommon.h | 1 + 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index 7e88fffaf3f5..edca73af3cc0 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@ -174,19 +174,35 @@ u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc) struct bcma_bus *bus = cc->core->bus; switch (bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4313: + case BCMA_CHIP_ID_BCM43224: + case BCMA_CHIP_ID_BCM43225: + case BCMA_CHIP_ID_BCM43227: + case BCMA_CHIP_ID_BCM43228: + case BCMA_CHIP_ID_BCM4331: + case BCMA_CHIP_ID_BCM43421: + case BCMA_CHIP_ID_BCM43428: + case BCMA_CHIP_ID_BCM43431: case BCMA_CHIP_ID_BCM4716: - case BCMA_CHIP_ID_BCM4748: case BCMA_CHIP_ID_BCM47162: - case BCMA_CHIP_ID_BCM4313: - case BCMA_CHIP_ID_BCM5357: + case BCMA_CHIP_ID_BCM4748: case BCMA_CHIP_ID_BCM4749: + case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM6362: /* always 20Mhz */ return 20000 * 1000; - case BCMA_CHIP_ID_BCM5356: case BCMA_CHIP_ID_BCM4706: + case BCMA_CHIP_ID_BCM5356: /* always 25Mhz */ return 25000 * 1000; + case BCMA_CHIP_ID_BCM43460: + case BCMA_CHIP_ID_BCM4352: + case BCMA_CHIP_ID_BCM4360: + if (cc->status & BCMA_CC_CHIPST_4360_XTAL_40MZ) + return 40000 * 1000; + else + return 20000 * 1000; default: bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n", bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 8390c474f69a..1db4c6de372e 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -104,6 +104,7 @@ #define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */ #define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */ #define BCMA_CC_CHIPST_5357_NAND_BOOT BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */ +#define BCMA_CC_CHIPST_4360_XTAL_40MZ 0x00000001 #define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */ #define BCMA_CC_JCMD_START 0x80000000 #define BCMA_CC_JCMD_BUSY 0x80000000 -- cgit v1.2.3-70-g09d2 From 6951618b4b0bb022429ab17d49f2fa3650f21cb4 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Wed, 27 Mar 2013 17:23:11 +0100 Subject: bcma: export bcma_chipco_get_alp_clock() This function will be used by brcmsmac. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville --- drivers/bcma/driver_chipcommon.c | 3 ++- include/linux/bcma/bcma_driver_chipcommon.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index 28fa50ad87be..88db0cb7bf19 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -25,13 +25,14 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, return value; } -static u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc) +u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc) { if (cc->capabilities & BCMA_CC_CAP_PMU) return bcma_pmu_get_alp_clock(cc); return 20000000; } +EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock); static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc) { diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 1db4c6de372e..453fcc914683 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -608,6 +608,8 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); +extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); + void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value); u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask); -- cgit v1.2.3-70-g09d2 From fbbdb8f096e0e5d8244e1ffa46e364146ab9a440 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 27 Mar 2013 16:46:06 +0000 Subject: net: fix compile error of implicit declaration of skb_probe_transport_header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit 40893fd(net: switch to use skb_probe_transport_header()) involes a new error accidently. When NET_SKBUFF_DATA_USES_OFFSE is not enabled, below compile error happens: CC net/packet/af_packet.o net/packet/af_packet.c: In function ‘packet_sendmsg_spkt’: net/packet/af_packet.c:1516:2: error: implicit declaration of function ‘skb_probe_transport_header’ [-Werror=implicit-function-declaration] cc1: some warnings being treated as errors make[2]: *** [net/packet/af_packet.o] Error 1 make[1]: *** [net/packet] Error 2 make: *** [net] Error 2 As it seems skb_probe_transport_header() is not related to NET_SKBUFF_DATA_USES_OFFSE, we should move the definition of skb_probe_transport_header() out of scope of NET_SKBUFF_DATA_USES_OFFSE macro. Cc: Jason Wang Cc: Eric Dumazet Signed-off-by: Ying Xue Acked-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fa88b966cb8e..878e0ee81068 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1560,19 +1560,6 @@ static inline void skb_set_transport_header(struct sk_buff *skb, skb->transport_header += offset; } -static inline void skb_probe_transport_header(struct sk_buff *skb, - const int offset_hint) -{ - struct flow_keys keys; - - if (skb_transport_header_was_set(skb)) - return; - else if (skb_flow_dissect(skb, &keys)) - skb_set_transport_header(skb, keys.thoff); - else - skb_set_transport_header(skb, offset_hint); -} - static inline unsigned char *skb_network_header(const struct sk_buff *skb) { return skb->head + skb->network_header; @@ -1716,6 +1703,19 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) } #endif /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline void skb_probe_transport_header(struct sk_buff *skb, + const int offset_hint) +{ + struct flow_keys keys; + + if (skb_transport_header_was_set(skb)) + return; + else if (skb_flow_dissect(skb, &keys)) + skb_set_transport_header(skb, keys.thoff); + else + skb_set_transport_header(skb, offset_hint); +} + static inline void skb_mac_header_rebuild(struct sk_buff *skb) { if (skb_mac_header_was_set(skb)) { -- cgit v1.2.3-70-g09d2 From f3d4039242af92a9d93dee2fd9ae47066b20ca29 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Wed, 27 Mar 2013 10:52:28 +0000 Subject: tokenring: delete last holdout of CONFIG_TR Tokenring support was deleted in v3.5. One last holdout of the macro CONFIG_TR escaped that fate. Until now. Signed-off-by: Paul Bolle Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 56e3e0665272..1dbb02c98946 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -144,8 +144,6 @@ static inline bool dev_xmit_complete(int rc) # else # define LL_MAX_HEADER 96 # endif -#elif IS_ENABLED(CONFIG_TR) -# define LL_MAX_HEADER 48 #else # define LL_MAX_HEADER 32 #endif -- cgit v1.2.3-70-g09d2 From e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Thu, 28 Mar 2013 13:38:25 +0900 Subject: net: add ETH_P_802_3_MIN Add a new constant ETH_P_802_3_MIN, the minimum ethernet type for an 802.3 frame. Frames with a lower value in the ethernet type field are Ethernet II. Also update all the users of this value that David Miller and I could find to use the new constant. Also correct a bug in util.c. The comparison with ETH_P_802_3_MIN should be >= not >. As suggested by Jesse Gross. Compile tested only. Cc: David Miller Cc: Jesse Gross Cc: Karsten Keil Cc: John W. Linville Cc: Johannes Berg Cc: Bart De Schuymer Cc: Stephen Hemminger Cc: Patrick McHardy Cc: Marcel Holtmann Cc: Gustavo Padovan Cc: Johan Hedberg Cc: linux-bluetooth@vger.kernel.org Cc: netfilter-devel@vger.kernel.org Cc: bridge@lists.linux-foundation.org Cc: linux-wireless@vger.kernel.org Cc: linux1394-devel@lists.sourceforge.net Cc: linux-media@vger.kernel.org Cc: netdev@vger.kernel.org Cc: dev@openvswitch.org Acked-by: Mauro Carvalho Chehab Acked-by: Stefan Richter Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/firewire/net.c | 2 +- drivers/isdn/i4l/isdn_net.c | 2 +- drivers/media/dvb-core/dvb_net.c | 10 +++++----- drivers/net/ethernet/sun/niu.c | 2 +- drivers/net/plip/plip.c | 2 +- drivers/net/wireless/ray_cs.c | 2 +- include/linux/if_vlan.h | 2 +- include/uapi/linux/if_ether.h | 3 +++ net/atm/lec.h | 2 +- net/bluetooth/bnep/netdev.c | 2 +- net/bridge/netfilter/ebtables.c | 2 +- net/ethernet/eth.c | 2 +- net/mac80211/tx.c | 2 +- net/openvswitch/datapath.c | 2 +- net/openvswitch/flow.c | 6 +++--- net/wireless/util.c | 2 +- 16 files changed, 24 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 56796330a162..4d565365e476 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -547,7 +547,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) skb->pkt_type = PACKET_OTHERHOST; } - if (ntohs(eth->h_proto) >= 1536) { + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { protocol = eth->h_proto; } else { rawp = (u16 *)skb->data; diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index babc621a07fb..88d657dff474 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1385,7 +1385,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) return eth->h_proto; rawp = skb->data; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 44225b186f6d..83a23afb13ab 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -185,7 +185,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb, skb->pkt_type=PACKET_MULTICAST; } - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) return eth->h_proto; rawp = skb->data; @@ -228,9 +228,9 @@ static int ule_test_sndu( struct dvb_net_priv *p ) static int ule_bridged_sndu( struct dvb_net_priv *p ) { struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr; - if(ntohs(hdr->h_proto) < 1536) { + if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) { int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data); - /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */ + /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */ if(framelen != ntohs(hdr->h_proto)) { return -1; } @@ -320,7 +320,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p ) (int) p->ule_sndu_type, l, total_ext_len); #endif - } while (p->ule_sndu_type < 1536); + } while (p->ule_sndu_type < ETH_P_802_3_MIN); return total_ext_len; } @@ -712,7 +712,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) } /* Handle ULE Extension Headers. */ - if (priv->ule_sndu_type < 1536) { + if (priv->ule_sndu_type < ETH_P_802_3_MIN) { /* There is an extension header. Handle it accordingly. */ int l = handle_ule_extensions(priv); if (l < 0) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e4c1c88e4c2a..95cff98d8a34 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, (len << TXHDR_LEN_SHIFT) | ((l3off / 2) << TXHDR_L3START_SHIFT) | (ihl << TXHDR_IHL_SHIFT) | - ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | + ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | (ipv6 ? TXHDR_IP_VER : 0) | csum_bits); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index bed62d9c53c8..1f7bef90b467 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -560,7 +560,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) * so don't forget to remove it. */ - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) return eth->h_proto; rawp = skb->data; diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 4775b5d172d5..ebada812b3a5 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -953,7 +953,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, unsigned char *data, int len) { __be16 proto = ((struct ethhdr *)data)->h_proto; - if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ + if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */ pr_debug("ray_cs translate_frame DIX II\n"); /* Copy LLC header to card buffer */ memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 218a3b686d90..70962f3fdb79 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -339,7 +339,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, */ proto = vhdr->h_vlan_encapsulated_proto; - if (ntohs(proto) >= 1536) { + if (ntohs(proto) >= ETH_P_802_3_MIN) { skb->protocol = proto; return; } diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 798032d01112..ade07f1c491a 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -94,6 +94,9 @@ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value + * then the frame is Ethernet II. Else it is 802.3 */ + /* * Non DIX types. Won't clash for 1500 types. */ diff --git a/net/atm/lec.h b/net/atm/lec.h index a86aff9a3c04..4149db1b7885 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -58,7 +58,7 @@ struct lane2_ops { * field in h_type field. Data follows immediately after header. * 2. LLC Data frames whose total length, including LLC field and data, * but not padding required to meet the minimum data frame length, - * is less than 1536(0x0600) MUST be encoded by placing that length + * is less than ETH_P_802_3_MIN MUST be encoded by placing that length * in the h_type field. The LLC field follows header immediately. * 3. LLC data frames longer than this maximum MUST be encoded by placing * the value 0 in the h_type field. diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index e58c8b32589c..4b488ec26105 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c @@ -136,7 +136,7 @@ static u16 bnep_net_eth_proto(struct sk_buff *skb) struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); - if (proto >= 1536) + if (proto >= ETH_P_802_3_MIN) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 8d493c91a562..3d110c4fc787 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -138,7 +138,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, ethproto = h->h_proto; if (e->bitmask & EBT_802_3) { - if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO)) + if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO)) return 1; } else if (!(e->bitmask & EBT_NOPROTO) && FWINV2(e->ethproto != ethproto, EBT_IPROTO)) diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index a36c85eab5b4..5359560926bc 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -195,7 +195,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) if (netdev_uses_trailer_tags(dev)) return htons(ETH_P_TRAILER); - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) return eth->h_proto; /* diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8914d2d2881a..4e8a86163fc7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2085,7 +2085,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; - } else if (ethertype >= 0x600) { + } else if (ethertype >= ETH_P_802_3_MIN) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d61cd9971808..8759265a3e46 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -681,7 +681,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) /* Normally, setting the skb 'protocol' field would be handled by a * call to eth_type_trans(), but it assumes there's a sending * device, which we may not have. */ - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) packet->protocol = eth->h_proto; else packet->protocol = htons(ETH_P_802_2); diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index fe0e4215c73d..332486839347 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -466,7 +466,7 @@ static __be16 parse_ethertype(struct sk_buff *skb) proto = *(__be16 *) skb->data; __skb_pull(skb, sizeof(__be16)); - if (ntohs(proto) >= 1536) + if (ntohs(proto) >= ETH_P_802_3_MIN) return proto; if (skb->len < sizeof(struct llc_snap_hdr)) @@ -483,7 +483,7 @@ static __be16 parse_ethertype(struct sk_buff *skb) __skb_pull(skb, sizeof(struct llc_snap_hdr)); - if (ntohs(llc->ethertype) >= 1536) + if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN) return llc->ethertype; return htons(ETH_P_802_2); @@ -1038,7 +1038,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (ntohs(swkey->eth.type) < 1536) + if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); } else { diff --git a/net/wireless/util.c b/net/wireless/util.c index 37a56ee1e1ed..6cbac99ae03d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -511,7 +511,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; - } else if (ethertype > 0x600) { + } else if (ethertype >= ETH_P_802_3_MIN) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; -- cgit v1.2.3-70-g09d2 From a3f109bd793dfe5c611220ca5ab6c72f1aed479e Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 28 Mar 2013 11:51:31 +0000 Subject: sh_eth: add R-Car support for real Commit d0418bb7123f44b23d69ac349eec7daf9103472f (net: sh_eth: Add eth support for R8A7779 device) was a failed attempt to add support for one of members of the R-Car SoC family. That's for three reasons: it treated R8A7779 the same as SH7724 except including quite dirty hack adding ECMR_ELB bit to the mask in sh_eth_set_rate() while not removing ECMR_RTM bit (despite it's reserved in R-Car Ether), and it didn't add a new register offset array despite the closest SH_ETH_REG_FAST_SH4 mapping differs by 0x200 to the offsets all the R-Car Ether registers have, and also some of the registers in this old mapping don't exist on R-Car Ether (due to this, SH7724's 'sh_eth_my_cpu_data' structure is not adequeate for R-Car too). Fix all these shortcomings, restoring the SH7724 related section to its pristine state... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 107 +++++++++++++++++++++++++++++++--- include/linux/sh_eth.h | 1 + 2 files changed, 100 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 13abe917cbdf..da604059b148 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2,7 +2,8 @@ * SuperH Ethernet device driver * * Copyright (C) 2006-2012 Nobuhiro Iwamatsu - * Copyright (C) 2008-2012 Renesas Solutions Corp. + * Copyright (C) 2008-2013 Renesas Solutions Corp. + * Copyright (C) 2013 Cogent Embedded, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -147,6 +148,51 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { [FWALCR1] = 0x00b4, }; +static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0300, + [RFLR] = 0x0308, + [ECSR] = 0x0310, + [ECSIPR] = 0x0318, + [PIR] = 0x0320, + [PSR] = 0x0328, + [RDMLR] = 0x0340, + [IPGR] = 0x0350, + [APR] = 0x0354, + [MPR] = 0x0358, + [RFCF] = 0x0360, + [TPAUSER] = 0x0364, + [TPAUSECR] = 0x0368, + [MAHR] = 0x03c0, + [MALR] = 0x03c8, + [TROCR] = 0x03d0, + [CDCR] = 0x03d4, + [LCCR] = 0x03d8, + [CNDCR] = 0x03dc, + [CEFCR] = 0x03e4, + [FRECR] = 0x03e8, + [TSFRCR] = 0x03ec, + [TLFRCR] = 0x03f0, + [RFCR] = 0x03f4, + [MAFCR] = 0x03f8, + + [EDMR] = 0x0200, + [EDTRR] = 0x0208, + [EDRRR] = 0x0210, + [TDLAR] = 0x0218, + [RDLAR] = 0x0220, + [EESR] = 0x0228, + [EESIPR] = 0x0230, + [TRSCER] = 0x0238, + [RMFCR] = 0x0240, + [TFTR] = 0x0248, + [FDR] = 0x0250, + [RMCR] = 0x0258, + [TFUCR] = 0x0264, + [RFOCR] = 0x0268, + [FCFTR] = 0x0270, + [TRIMD] = 0x027c, +}; + static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { [ECMR] = 0x0100, [RFLR] = 0x0108, @@ -296,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) +#if defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -311,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned int bits = ECMR_RTM; -#if defined(CONFIG_ARCH_R8A7779) - bits |= ECMR_ELB; -#endif + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); + break; + default: + break; + } +} + +/* R8A7779 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; +#elif defined(CONFIG_CPU_SUBTYPE_SH7724) +#define SH_ETH_RESET_DEFAULT 1 +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); break; default: break; @@ -2521,6 +2609,9 @@ static const u16 *sh_eth_get_register_offset(int register_type) case SH_ETH_REG_GIGABIT: reg_offset = sh_eth_offset_gigabit; break; + case SH_ETH_REG_FAST_RCAR: + reg_offset = sh_eth_offset_fast_rcar; + break; case SH_ETH_REG_FAST_SH4: reg_offset = sh_eth_offset_fast_sh4; break; diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index b17d765ded84..fc305713fc6d 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -6,6 +6,7 @@ enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; enum { SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_RCAR, SH_ETH_REG_FAST_SH4, SH_ETH_REG_FAST_SH3_SH2 }; -- cgit v1.2.3-70-g09d2 From 14b57a10553b5b768f77b247e6dd285c65816064 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 29 Mar 2013 14:46:51 +0100 Subject: openvswitch: Use ETH_ALEN to define ethernet addresses Signed-off-by: Thomas Graf Signed-off-by: Jesse Gross --- include/linux/openvswitch.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index 67d6c7b03581..8b9d7217eddc 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -20,6 +20,7 @@ #define _LINUX_OPENVSWITCH_H 1 #include +#include /** * struct ovs_header - header for OVS Generic Netlink messages. @@ -269,8 +270,8 @@ enum ovs_frag_type { #define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) struct ovs_key_ethernet { - __u8 eth_src[6]; - __u8 eth_dst[6]; + __u8 eth_src[ETH_ALEN]; + __u8 eth_dst[ETH_ALEN]; }; struct ovs_key_ipv4 { @@ -316,14 +317,14 @@ struct ovs_key_arp { __be32 arp_sip; __be32 arp_tip; __be16 arp_op; - __u8 arp_sha[6]; - __u8 arp_tha[6]; + __u8 arp_sha[ETH_ALEN]; + __u8 arp_tha[ETH_ALEN]; }; struct ovs_key_nd { __u32 nd_target[4]; - __u8 nd_sll[6]; - __u8 nd_tll[6]; + __u8 nd_sll[ETH_ALEN]; + __u8 nd_tll[ETH_ALEN]; }; /** -- cgit v1.2.3-70-g09d2 From 22e3880a76bb9a0c4fa5c8fefdc8697a36a4dae1 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 29 Mar 2013 14:46:52 +0100 Subject: openvswitch: Expose to userspace It contains the public netlink interface bits required by userspace to make use of the interface. Signed-off-by: Thomas Graf Signed-off-by: Jesse Gross --- include/linux/openvswitch.h | 433 +------------------------------------ include/uapi/linux/Kbuild | 1 + include/uapi/linux/openvswitch.h | 456 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 458 insertions(+), 432 deletions(-) create mode 100644 include/uapi/linux/openvswitch.h (limited to 'include/linux') diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index 8b9d7217eddc..e6b240b6196c 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -19,437 +19,6 @@ #ifndef _LINUX_OPENVSWITCH_H #define _LINUX_OPENVSWITCH_H 1 -#include -#include - -/** - * struct ovs_header - header for OVS Generic Netlink messages. - * @dp_ifindex: ifindex of local port for datapath (0 to make a request not - * specific to a datapath). - * - * Attributes following the header are specific to a particular OVS Generic - * Netlink family, but all of the OVS families use this header. - */ - -struct ovs_header { - int dp_ifindex; -}; - -/* Datapaths. */ - -#define OVS_DATAPATH_FAMILY "ovs_datapath" -#define OVS_DATAPATH_MCGROUP "ovs_datapath" -#define OVS_DATAPATH_VERSION 0x1 - -enum ovs_datapath_cmd { - OVS_DP_CMD_UNSPEC, - OVS_DP_CMD_NEW, - OVS_DP_CMD_DEL, - OVS_DP_CMD_GET, - OVS_DP_CMD_SET -}; - -/** - * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. - * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local - * port". This is the name of the network device whose dp_ifindex is given in - * the &struct ovs_header. Always present in notifications. Required in - * %OVS_DP_NEW requests. May be used as an alternative to specifying - * dp_ifindex in other requests (with a dp_ifindex of 0). - * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially - * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on - * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should - * not be sent. - * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the - * datapath. Always present in notifications. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_DP_* commands. - */ -enum ovs_datapath_attr { - OVS_DP_ATTR_UNSPEC, - OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ - OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ - OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ - __OVS_DP_ATTR_MAX -}; - -#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) - -struct ovs_dp_stats { - __u64 n_hit; /* Number of flow table matches. */ - __u64 n_missed; /* Number of flow table misses. */ - __u64 n_lost; /* Number of misses not sent to userspace. */ - __u64 n_flows; /* Number of flows present */ -}; - -struct ovs_vport_stats { - __u64 rx_packets; /* total packets received */ - __u64 tx_packets; /* total packets transmitted */ - __u64 rx_bytes; /* total bytes received */ - __u64 tx_bytes; /* total bytes transmitted */ - __u64 rx_errors; /* bad packets received */ - __u64 tx_errors; /* packet transmit problems */ - __u64 rx_dropped; /* no space in linux buffers */ - __u64 tx_dropped; /* no space available in linux */ -}; - -/* Fixed logical ports. */ -#define OVSP_LOCAL ((__u32)0) - -/* Packet transfer. */ - -#define OVS_PACKET_FAMILY "ovs_packet" -#define OVS_PACKET_VERSION 0x1 - -enum ovs_packet_cmd { - OVS_PACKET_CMD_UNSPEC, - - /* Kernel-to-user notifications. */ - OVS_PACKET_CMD_MISS, /* Flow table miss. */ - OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ - - /* Userspace commands. */ - OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ -}; - -/** - * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. - * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire - * packet as received, from the start of the Ethernet header onward. For - * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by - * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is - * the flow key extracted from the packet as originally received. - * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key - * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows - * userspace to adapt its flow setup strategy by comparing its notion of the - * flow key against the kernel's. - * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used - * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. - * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION - * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an - * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content - * specified there. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_PACKET_* commands. - */ -enum ovs_packet_attr { - OVS_PACKET_ATTR_UNSPEC, - OVS_PACKET_ATTR_PACKET, /* Packet data. */ - OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ - OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ - __OVS_PACKET_ATTR_MAX -}; - -#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) - -/* Virtual ports. */ - -#define OVS_VPORT_FAMILY "ovs_vport" -#define OVS_VPORT_MCGROUP "ovs_vport" -#define OVS_VPORT_VERSION 0x1 - -enum ovs_vport_cmd { - OVS_VPORT_CMD_UNSPEC, - OVS_VPORT_CMD_NEW, - OVS_VPORT_CMD_DEL, - OVS_VPORT_CMD_GET, - OVS_VPORT_CMD_SET -}; - -enum ovs_vport_type { - OVS_VPORT_TYPE_UNSPEC, - OVS_VPORT_TYPE_NETDEV, /* network device */ - OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ - __OVS_VPORT_TYPE_MAX -}; - -#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) - -/** - * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. - * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. - * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type - * of vport. - * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device - * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes - * plus a null terminator. - * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. - * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that - * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on - * this port. A value of zero indicates that upcalls should not be sent. - * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for - * packets sent or received through the vport. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_VPORT_* commands. - * - * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and - * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is - * optional; if not specified a free port number is automatically selected. - * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type - * of vport. - * and other attributes are ignored. - * - * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to - * look up the vport to operate on; otherwise dp_idx from the &struct - * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. - */ -enum ovs_vport_attr { - OVS_VPORT_ATTR_UNSPEC, - OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ - OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ - OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ - OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ - OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ - OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ - __OVS_VPORT_ATTR_MAX -}; - -#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) - -/* Flows. */ - -#define OVS_FLOW_FAMILY "ovs_flow" -#define OVS_FLOW_MCGROUP "ovs_flow" -#define OVS_FLOW_VERSION 0x1 - -enum ovs_flow_cmd { - OVS_FLOW_CMD_UNSPEC, - OVS_FLOW_CMD_NEW, - OVS_FLOW_CMD_DEL, - OVS_FLOW_CMD_GET, - OVS_FLOW_CMD_SET -}; - -struct ovs_flow_stats { - __u64 n_packets; /* Number of matched packets. */ - __u64 n_bytes; /* Number of matched bytes. */ -}; - -enum ovs_key_attr { - OVS_KEY_ATTR_UNSPEC, - OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ - OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ - OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ - OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ - OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ - OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ - OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ - OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ - OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ - OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ - OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ - OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ - OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ - OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ - OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */ - __OVS_KEY_ATTR_MAX -}; - -#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) - -/** - * enum ovs_frag_type - IPv4 and IPv6 fragment type - * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. - * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. - * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. - * - * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct - * ovs_key_ipv6. - */ -enum ovs_frag_type { - OVS_FRAG_TYPE_NONE, - OVS_FRAG_TYPE_FIRST, - OVS_FRAG_TYPE_LATER, - __OVS_FRAG_TYPE_MAX -}; - -#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) - -struct ovs_key_ethernet { - __u8 eth_src[ETH_ALEN]; - __u8 eth_dst[ETH_ALEN]; -}; - -struct ovs_key_ipv4 { - __be32 ipv4_src; - __be32 ipv4_dst; - __u8 ipv4_proto; - __u8 ipv4_tos; - __u8 ipv4_ttl; - __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ -}; - -struct ovs_key_ipv6 { - __be32 ipv6_src[4]; - __be32 ipv6_dst[4]; - __be32 ipv6_label; /* 20-bits in least-significant bits. */ - __u8 ipv6_proto; - __u8 ipv6_tclass; - __u8 ipv6_hlimit; - __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ -}; - -struct ovs_key_tcp { - __be16 tcp_src; - __be16 tcp_dst; -}; - -struct ovs_key_udp { - __be16 udp_src; - __be16 udp_dst; -}; - -struct ovs_key_icmp { - __u8 icmp_type; - __u8 icmp_code; -}; - -struct ovs_key_icmpv6 { - __u8 icmpv6_type; - __u8 icmpv6_code; -}; - -struct ovs_key_arp { - __be32 arp_sip; - __be32 arp_tip; - __be16 arp_op; - __u8 arp_sha[ETH_ALEN]; - __u8 arp_tha[ETH_ALEN]; -}; - -struct ovs_key_nd { - __u32 nd_target[4]; - __u8 nd_sll[ETH_ALEN]; - __u8 nd_tll[ETH_ALEN]; -}; - -/** - * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. - * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow - * key. Always present in notifications. Required for all requests (except - * dumps). - * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying - * the actions to take for packets that match the key. Always present in - * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for - * %OVS_FLOW_CMD_SET requests. - * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this - * flow. Present in notifications if the stats would be nonzero. Ignored in - * requests. - * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the - * TCP flags seen on packets in this flow. Only present in notifications for - * TCP flows, and only if it would be nonzero. Ignored in requests. - * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on - * the system monotonic clock, at which a packet was last processed for this - * flow. Only present in notifications if a packet has been processed for this - * flow. Ignored in requests. - * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the - * last-used time, accumulated TCP flags, and statistics for this flow. - * Otherwise ignored in requests. Never present in notifications. - * - * These attributes follow the &struct ovs_header within the Generic Netlink - * payload for %OVS_FLOW_* commands. - */ -enum ovs_flow_attr { - OVS_FLOW_ATTR_UNSPEC, - OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ - OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ - OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ - OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ - OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ - __OVS_FLOW_ATTR_MAX -}; - -#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) - -/** - * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. - * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with - * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of - * %UINT32_MAX samples all packets and intermediate values sample intermediate - * fractions of packets. - * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. - * Actions are passed as nested attributes. - * - * Executes the specified actions with the given probability on a per-packet - * basis. - */ -enum ovs_sample_attr { - OVS_SAMPLE_ATTR_UNSPEC, - OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ - OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ - __OVS_SAMPLE_ATTR_MAX, -}; - -#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) - -/** - * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. - * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION - * message should be sent. Required. - * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is - * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. - */ -enum ovs_userspace_attr { - OVS_USERSPACE_ATTR_UNSPEC, - OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ - OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ - __OVS_USERSPACE_ATTR_MAX -}; - -#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) - -/** - * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. - * @vlan_tpid: Tag protocol identifier (TPID) to push. - * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set - * (but it will not be set in the 802.1Q header that is pushed). - * - * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID - * values are those that the kernel module also parses as 802.1Q headers, to - * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN - * from having surprising results. - */ -struct ovs_action_push_vlan { - __be16 vlan_tpid; /* 802.1Q TPID. */ - __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ -}; - -/** - * enum ovs_action_attr - Action types. - * - * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. - * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested - * %OVS_USERSPACE_ATTR_* attributes. - * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The - * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its - * value. - * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the - * packet. - * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. - * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in - * the nested %OVS_SAMPLE_ATTR_* attributes. - * - * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all - * fields within a header are modifiable, e.g. the IPv4 protocol and fragment - * type may not be changed. - */ - -enum ovs_action_attr { - OVS_ACTION_ATTR_UNSPEC, - OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ - OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ - OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ - OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ - OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ - OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ - __OVS_ACTION_ATTR_MAX -}; - -#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) +#include #endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 5c8a1d25e21c..d8fbc6aeac86 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -285,6 +285,7 @@ header-y += nvram.h header-y += omap3isp.h header-y += omapfb.h header-y += oom.h +header-y += openvswitch.h header-y += packet_diag.h header-y += param.h header-y += parport.h diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h new file mode 100644 index 000000000000..405918dd7b3f --- /dev/null +++ b/include/uapi/linux/openvswitch.h @@ -0,0 +1,456 @@ + +/* + * Copyright (c) 2007-2011 Nicira Networks. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#ifndef _UAPI__LINUX_OPENVSWITCH_H +#define _UAPI__LINUX_OPENVSWITCH_H 1 + +#include +#include + +/** + * struct ovs_header - header for OVS Generic Netlink messages. + * @dp_ifindex: ifindex of local port for datapath (0 to make a request not + * specific to a datapath). + * + * Attributes following the header are specific to a particular OVS Generic + * Netlink family, but all of the OVS families use this header. + */ + +struct ovs_header { + int dp_ifindex; +}; + +/* Datapaths. */ + +#define OVS_DATAPATH_FAMILY "ovs_datapath" +#define OVS_DATAPATH_MCGROUP "ovs_datapath" +#define OVS_DATAPATH_VERSION 0x1 + +enum ovs_datapath_cmd { + OVS_DP_CMD_UNSPEC, + OVS_DP_CMD_NEW, + OVS_DP_CMD_DEL, + OVS_DP_CMD_GET, + OVS_DP_CMD_SET +}; + +/** + * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. + * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local + * port". This is the name of the network device whose dp_ifindex is given in + * the &struct ovs_header. Always present in notifications. Required in + * %OVS_DP_NEW requests. May be used as an alternative to specifying + * dp_ifindex in other requests (with a dp_ifindex of 0). + * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially + * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on + * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should + * not be sent. + * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the + * datapath. Always present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_DP_* commands. + */ +enum ovs_datapath_attr { + OVS_DP_ATTR_UNSPEC, + OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ + OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ + OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ + __OVS_DP_ATTR_MAX +}; + +#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) + +struct ovs_dp_stats { + __u64 n_hit; /* Number of flow table matches. */ + __u64 n_missed; /* Number of flow table misses. */ + __u64 n_lost; /* Number of misses not sent to userspace. */ + __u64 n_flows; /* Number of flows present */ +}; + +struct ovs_vport_stats { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* no space in linux buffers */ + __u64 tx_dropped; /* no space available in linux */ +}; + +/* Fixed logical ports. */ +#define OVSP_LOCAL ((__u32)0) + +/* Packet transfer. */ + +#define OVS_PACKET_FAMILY "ovs_packet" +#define OVS_PACKET_VERSION 0x1 + +enum ovs_packet_cmd { + OVS_PACKET_CMD_UNSPEC, + + /* Kernel-to-user notifications. */ + OVS_PACKET_CMD_MISS, /* Flow table miss. */ + OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ + + /* Userspace commands. */ + OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ +}; + +/** + * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. + * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire + * packet as received, from the start of the Ethernet header onward. For + * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by + * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is + * the flow key extracted from the packet as originally received. + * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key + * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows + * userspace to adapt its flow setup strategy by comparing its notion of the + * flow key against the kernel's. + * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used + * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. + * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content + * specified there. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_PACKET_* commands. + */ +enum ovs_packet_attr { + OVS_PACKET_ATTR_UNSPEC, + OVS_PACKET_ATTR_PACKET, /* Packet data. */ + OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ + OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ + __OVS_PACKET_ATTR_MAX +}; + +#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) + +/* Virtual ports. */ + +#define OVS_VPORT_FAMILY "ovs_vport" +#define OVS_VPORT_MCGROUP "ovs_vport" +#define OVS_VPORT_VERSION 0x1 + +enum ovs_vport_cmd { + OVS_VPORT_CMD_UNSPEC, + OVS_VPORT_CMD_NEW, + OVS_VPORT_CMD_DEL, + OVS_VPORT_CMD_GET, + OVS_VPORT_CMD_SET +}; + +enum ovs_vport_type { + OVS_VPORT_TYPE_UNSPEC, + OVS_VPORT_TYPE_NETDEV, /* network device */ + OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ + __OVS_VPORT_TYPE_MAX +}; + +#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) + +/** + * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. + * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. + * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type + * of vport. + * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device + * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes + * plus a null terminator. + * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. + * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that + * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on + * this port. A value of zero indicates that upcalls should not be sent. + * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for + * packets sent or received through the vport. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_VPORT_* commands. + * + * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and + * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is + * optional; if not specified a free port number is automatically selected. + * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type + * of vport. + * and other attributes are ignored. + * + * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to + * look up the vport to operate on; otherwise dp_idx from the &struct + * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. + */ +enum ovs_vport_attr { + OVS_VPORT_ATTR_UNSPEC, + OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ + OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ + OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ + OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ + OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ + OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ + __OVS_VPORT_ATTR_MAX +}; + +#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) + +/* Flows. */ + +#define OVS_FLOW_FAMILY "ovs_flow" +#define OVS_FLOW_MCGROUP "ovs_flow" +#define OVS_FLOW_VERSION 0x1 + +enum ovs_flow_cmd { + OVS_FLOW_CMD_UNSPEC, + OVS_FLOW_CMD_NEW, + OVS_FLOW_CMD_DEL, + OVS_FLOW_CMD_GET, + OVS_FLOW_CMD_SET +}; + +struct ovs_flow_stats { + __u64 n_packets; /* Number of matched packets. */ + __u64 n_bytes; /* Number of matched bytes. */ +}; + +enum ovs_key_attr { + OVS_KEY_ATTR_UNSPEC, + OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ + OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ + OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ + OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ + OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ + OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ + OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ + OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ + OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ + OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ + OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ + OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ + OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ + OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ + OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */ + __OVS_KEY_ATTR_MAX +}; + +#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) + +/** + * enum ovs_frag_type - IPv4 and IPv6 fragment type + * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. + * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. + * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. + * + * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct + * ovs_key_ipv6. + */ +enum ovs_frag_type { + OVS_FRAG_TYPE_NONE, + OVS_FRAG_TYPE_FIRST, + OVS_FRAG_TYPE_LATER, + __OVS_FRAG_TYPE_MAX +}; + +#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) + +struct ovs_key_ethernet { + __u8 eth_src[ETH_ALEN]; + __u8 eth_dst[ETH_ALEN]; +}; + +struct ovs_key_ipv4 { + __be32 ipv4_src; + __be32 ipv4_dst; + __u8 ipv4_proto; + __u8 ipv4_tos; + __u8 ipv4_ttl; + __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_ipv6 { + __be32 ipv6_src[4]; + __be32 ipv6_dst[4]; + __be32 ipv6_label; /* 20-bits in least-significant bits. */ + __u8 ipv6_proto; + __u8 ipv6_tclass; + __u8 ipv6_hlimit; + __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_tcp { + __be16 tcp_src; + __be16 tcp_dst; +}; + +struct ovs_key_udp { + __be16 udp_src; + __be16 udp_dst; +}; + +struct ovs_key_icmp { + __u8 icmp_type; + __u8 icmp_code; +}; + +struct ovs_key_icmpv6 { + __u8 icmpv6_type; + __u8 icmpv6_code; +}; + +struct ovs_key_arp { + __be32 arp_sip; + __be32 arp_tip; + __be16 arp_op; + __u8 arp_sha[ETH_ALEN]; + __u8 arp_tha[ETH_ALEN]; +}; + +struct ovs_key_nd { + __u32 nd_target[4]; + __u8 nd_sll[ETH_ALEN]; + __u8 nd_tll[ETH_ALEN]; +}; + +/** + * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. + * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow + * key. Always present in notifications. Required for all requests (except + * dumps). + * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying + * the actions to take for packets that match the key. Always present in + * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for + * %OVS_FLOW_CMD_SET requests. + * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this + * flow. Present in notifications if the stats would be nonzero. Ignored in + * requests. + * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the + * TCP flags seen on packets in this flow. Only present in notifications for + * TCP flows, and only if it would be nonzero. Ignored in requests. + * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on + * the system monotonic clock, at which a packet was last processed for this + * flow. Only present in notifications if a packet has been processed for this + * flow. Ignored in requests. + * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the + * last-used time, accumulated TCP flags, and statistics for this flow. + * Otherwise ignored in requests. Never present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_FLOW_* commands. + */ +enum ovs_flow_attr { + OVS_FLOW_ATTR_UNSPEC, + OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ + OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ + OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ + OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ + __OVS_FLOW_ATTR_MAX +}; + +#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) + +/** + * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. + * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with + * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of + * %UINT32_MAX samples all packets and intermediate values sample intermediate + * fractions of packets. + * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. + * Actions are passed as nested attributes. + * + * Executes the specified actions with the given probability on a per-packet + * basis. + */ +enum ovs_sample_attr { + OVS_SAMPLE_ATTR_UNSPEC, + OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ + OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + __OVS_SAMPLE_ATTR_MAX, +}; + +#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) + +/** + * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. + * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION + * message should be sent. Required. + * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is + * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. + */ +enum ovs_userspace_attr { + OVS_USERSPACE_ATTR_UNSPEC, + OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ + OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ + __OVS_USERSPACE_ATTR_MAX +}; + +#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) + +/** + * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. + * @vlan_tpid: Tag protocol identifier (TPID) to push. + * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set + * (but it will not be set in the 802.1Q header that is pushed). + * + * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID + * values are those that the kernel module also parses as 802.1Q headers, to + * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN + * from having surprising results. + */ +struct ovs_action_push_vlan { + __be16 vlan_tpid; /* 802.1Q TPID. */ + __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ +}; + +/** + * enum ovs_action_attr - Action types. + * + * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. + * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested + * %OVS_USERSPACE_ATTR_* attributes. + * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The + * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its + * value. + * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the + * packet. + * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. + * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in + * the nested %OVS_SAMPLE_ATTR_* attributes. + * + * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all + * fields within a header are modifiable, e.g. the IPv4 protocol and fragment + * type may not be changed. + */ + +enum ovs_action_attr { + OVS_ACTION_ATTR_UNSPEC, + OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ + OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ + OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ + OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ + OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ + OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + __OVS_ACTION_ATTR_MAX +}; + +#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) + +#endif /* _LINUX_OPENVSWITCH_H */ -- cgit v1.2.3-70-g09d2 From a691ce7fe451363d2f1fa48d30c8f4b87c2475d4 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 28 Mar 2013 15:24:53 +0000 Subject: include/linux: printk is needed in filter.h when CONFIG_BPF_JIT is defined for make V=1 EXTRA_CFLAGS=-W ARCH=arm allmodconfig printk is need when CONFIG_BPF_JIT is defined or it will report pr_err and print_hex_dump are implicit declaration Signed-off-by: Chen Gang Signed-off-by: David S. Miller --- include/linux/filter.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index d7d25083130b..d1248f401a56 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -48,6 +48,9 @@ extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); #ifdef CONFIG_BPF_JIT +#include +#include + extern void bpf_jit_compile(struct sk_filter *fp); extern void bpf_jit_free(struct sk_filter *fp); -- cgit v1.2.3-70-g09d2 From 4c3d5e7b41dda1b1372bfc2545ef092a1bc5ad33 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 30 Mar 2013 06:31:03 +0000 Subject: net: reorder some fields of net_device As time passed, some fields were added in net_device, and not at sensible offsets. Lets reorder some fields to reduce number of cache lines in RX path. Fields not used in data path should be moved out of this critical cache line. In particular, move broadcast[] to the end of the rx section, as it is less used, and ethernet uses only the beginning of the 32bytes field. Before patch : offsetof(struct net_device,dev_addr)=0x258 offsetof(struct net_device,rx_handler)=0x2b8 offsetof(struct net_device,ingress_queue)=0x2c8 offsetof(struct net_device,broadcast)=0x278 After : offsetof(struct net_device,dev_addr)=0x280 offsetof(struct net_device,rx_handler)=0x298 offsetof(struct net_device,ingress_queue)=0x2a8 offsetof(struct net_device,broadcast)=0x2b0 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1dbb02c98946..4491414a9218 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1071,6 +1071,8 @@ struct net_device { struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; + struct list_head upper_dev_list; /* List of upper devices */ + /* currently active device features */ netdev_features_t features; @@ -1143,6 +1145,13 @@ struct net_device { spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; /* Unicast mac addresses */ struct netdev_hw_addr_list mc; /* Multicast mac addresses */ + struct netdev_hw_addr_list dev_addrs; /* list of device + * hw addresses + */ +#ifdef CONFIG_SYSFS + struct kset *queues_kset; +#endif + bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; @@ -1175,21 +1184,11 @@ struct net_device { * avoid dirtying this cache line. */ - struct list_head upper_dev_list; /* List of upper devices */ - /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; /* hw address, (before bcast because most packets are unicast) */ - struct netdev_hw_addr_list dev_addrs; /* list of device - hw addresses */ - - unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ - -#ifdef CONFIG_SYSFS - struct kset *queues_kset; -#endif #ifdef CONFIG_RPS struct netdev_rx_queue *_rx; @@ -1200,18 +1199,14 @@ struct net_device { /* Number of RX queues currently active in device */ unsigned int real_num_rx_queues; -#ifdef CONFIG_RFS_ACCEL - /* CPU reverse-mapping for RX completion interrupts, indexed - * by RX queue number. Assigned by driver. This must only be - * set if the ndo_rx_flow_steer operation is defined. */ - struct cpu_rmap *rx_cpu_rmap; -#endif #endif rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; struct netdev_queue __rcu *ingress_queue; + unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + /* * Cache lines mostly used on transmit path @@ -1233,6 +1228,12 @@ struct net_device { #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif +#ifdef CONFIG_RFS_ACCEL + /* CPU reverse-mapping for RX completion interrupts, indexed + * by RX queue number. Assigned by driver. This must only be + * set if the ndo_rx_flow_steer operation is defined. */ + struct cpu_rmap *rx_cpu_rmap; +#endif /* These may be needed for future network-power-down code. */ -- cgit v1.2.3-70-g09d2 From 932bc4d7a53ba418de67fdab533248df5b36c752 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Thu, 21 Mar 2013 11:57:58 +0200 Subject: net: add skb_dst_set_noref_force Rename skb_dst_set_noref to __skb_dst_set_noref and add force flag as suggested by David Miller. The new wrapper skb_dst_set_noref_force will force dst entries that are not cached to be attached as skb dst without taking reference as long as provided dst is reclaimed after RCU grace period. Signed-off-by: Julian Anastasov Signed-off by: Hans Schillstrom Acked-by: David S. Miller Signed-off-by: Simon Horman --- include/linux/skbuff.h | 35 ++++++++++++++++++++++++++++++++++- net/core/dst.c | 9 +++++---- 2 files changed, 39 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 878e0ee81068..364e2440a7ee 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -575,7 +575,40 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); +extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, + bool force); + +/** + * skb_dst_set_noref - sets skb dst, hopefully, without taking reference + * @skb: buffer + * @dst: dst entry + * + * Sets skb dst, assuming a reference was not taken on dst. + * If dst entry is cached, we do not take reference and dst_release + * will be avoided by refdst_drop. If dst entry is not cached, we take + * reference, so that last dst_release can destroy the dst immediately. + */ +static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) +{ + __skb_dst_set_noref(skb, dst, false); +} + +/** + * skb_dst_set_noref_force - sets skb dst, without taking reference + * @skb: buffer + * @dst: dst entry + * + * Sets skb dst, assuming a reference was not taken on dst. + * No reference is taken and no dst_release will be called. While for + * cached dsts deferred reclaim is a basic feature, for entries that are + * not cached it is caller's job to guarantee that last dst_release for + * provided dst happens when nobody uses it, eg. after a RCU grace period. + */ +static inline void skb_dst_set_noref_force(struct sk_buff *skb, + struct dst_entry *dst) +{ + __skb_dst_set_noref(skb, dst, true); +} /** * skb_dst_is_noref - Test if skb dst isn't refcounted diff --git a/net/core/dst.c b/net/core/dst.c index 35fd12f1a69c..df9cc810ec8e 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -320,27 +320,28 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) EXPORT_SYMBOL(__dst_destroy_metrics_generic); /** - * skb_dst_set_noref - sets skb dst, without a reference + * __skb_dst_set_noref - sets skb dst, without a reference * @skb: buffer * @dst: dst entry + * @force: if force is set, use noref version even for DST_NOCACHE entries * * Sets skb dst, assuming a reference was not taken on dst * skb_dst_drop() should not dst_release() this dst */ -void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) +void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); /* If dst not in cache, we must take a reference, because * dst_release() will destroy dst as soon as its refcount becomes zero */ - if (unlikely(dst->flags & DST_NOCACHE)) { + if (unlikely((dst->flags & DST_NOCACHE) && !force)) { dst_hold(dst); skb_dst_set(skb, dst); } else { skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } } -EXPORT_SYMBOL(skb_dst_set_noref); +EXPORT_SYMBOL(__skb_dst_set_noref); /* Dirty hack. We did it in 2.2 (in __dst_free), * we have _very_ good reasons not to repeat -- cgit v1.2.3-70-g09d2 From 65b3841b9cb5fe1b239f12dbf033f9827d73d032 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 2 Apr 2013 09:35:07 +0000 Subject: of_net.h: Provide empty functions if OF_NET is not configured of_get_mac_address() and of_get_phy_mode() are only provided if OF_NET is configured. While most callers check for the define, not all do, and those who do require #ifdef around the code. For those who don't, the missing check can result in errors such as arch/powerpc/sysdev/tsi108_dev.c:107:3: error: implicit declaration of function 'of_get_mac_address' [-Werror=implicit-function-declaration] arch/powerpc/sysdev/mv64x60_dev.c:253:2: error: implicit declaration of function 'of_get_mac_address' [-Werror=implicit-function-declaration] Provide empty functions if OF_NET is not configured. This is safe because all callers do check the return values. Cc: David Daney Signed-off-by: Guenter Roeck Acked-by: Rob Herring Signed-off-by: David S. Miller --- include/linux/of_net.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/of_net.h b/include/linux/of_net.h index f47464188710..61bf53b02779 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -11,6 +11,16 @@ #include extern const int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); +#else +static inline const int of_get_phy_mode(struct device_node *np) +{ + return -ENODEV; +} + +static inline const void *of_get_mac_address(struct device_node *np) +{ + return NULL; +} #endif #endif /* __LINUX_OF_NET_H */ -- cgit v1.2.3-70-g09d2 From 12202fa7573d32aa0915cde6e8fab4c86b63ca2c Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 5 Apr 2013 19:40:10 +0200 Subject: netfilter: remove unneeded variable proc_net_netfilter Now that this supports net namespace for nflog and nfqueue, we can remove the global proc_net_netfilter which has no clients anymore. Based on patch from Gao feng. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 5 ----- net/netfilter/core.c | 16 ++++------------ 2 files changed, 4 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index ee142846f56a..0060fde3160e 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -289,11 +289,6 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) #endif } -#ifdef CONFIG_PROC_FS -#include -extern struct proc_dir_entry *proc_net_netfilter; -#endif - #else /* !CONFIG_NETFILTER */ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) #define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index b085184d9b45..7d97302f7c07 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -276,23 +276,15 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(nf_nat_decode_session_hook); #endif -#ifdef CONFIG_PROC_FS -struct proc_dir_entry *proc_net_netfilter; -EXPORT_SYMBOL(proc_net_netfilter); -#endif - static int __net_init netfilter_net_init(struct net *net) { #ifdef CONFIG_PROC_FS net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", net->proc_net); - if (net_eq(net, &init_net)) { - if (!net->nf.proc_netfilter) - return -ENOMEM; - else - proc_net_netfilter = net->nf.proc_netfilter; - } else if (!net->nf.proc_netfilter) { - pr_err("cannot create netfilter proc entry"); + if (!net->nf.proc_netfilter) { + if (!net_eq(net, &init_net)) + pr_err("cannot create netfilter proc entry"); + return -ENOMEM; } #endif -- cgit v1.2.3-70-g09d2 From 540b3a39eea0056d305f17dda47eb185c4d56ddc Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 7 Apr 2013 03:44:07 +0000 Subject: net/mlx4_en: Enable DCB ETS ops only when supported by the firmware Enable the DCB ETS ops only when supported by the firmware. For older firmware/cards which don't support ETS, advertize only PFC DCB ops. Signed-off-by: Eugenia Emantayev Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 10 ++++++++-- drivers/net/ethernet/mellanox/mlx4/fw.c | 1 + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 + include/linux/mlx4/device.h | 1 + 5 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index b799ab12a291..321553fd58df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -253,3 +253,11 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, }; + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 473c9d2fec1a..d2a4f919bf1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2013,8 +2013,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); #ifdef CONFIG_MLX4_EN_DCB - if (!mlx4_is_slave(priv->mdev->dev)) - dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + if (!mlx4_is_slave(priv->mdev->dev)) { + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + } else { + en_info(priv, "enabling only PFC DCB ops\n"); + dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; + } + } #endif for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 876439748cdf..ab470d991ade 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", [48] = "Counters support", + [53] = "Port ETS Scheduler support", [55] = "Port link type sensing support", [59] = "Port management change event support", [61] = "64 byte EQE support", diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f710b7ce0dcb..d4cb5d3b28a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -624,6 +624,7 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); #ifdef CONFIG_MLX4_EN_DCB extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; #endif int mlx4_en_setup_tc(struct net_device *dev, u8 up); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 811f91cf5e8c..1bc5a750b330 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -140,6 +140,7 @@ enum { MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, -- cgit v1.2.3-70-g09d2 From 79ba1d8910f517c3bd39d794ddb1a5b4c03795c4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Mar 2013 14:38:07 +0100 Subject: mac80211: parse Timeout Interval Element using a struct Instead of open-coding the accesses and length check do the length check in the IE parser and assign a struct pointer for use in the remaining code. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 10 ++++++++++ net/mac80211/ieee80211_i.h | 3 +-- net/mac80211/mlme.c | 6 +++--- net/mac80211/util.c | 6 ++++-- 4 files changed, 18 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d10b5bba3268..e46fea8b972e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1955,6 +1955,16 @@ enum ieee80211_timeout_interval_type { WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, }; +/** + * struct ieee80211_timeout_interval_ie - Timeout Interval element + * @type: type, see &enum ieee80211_timeout_interval_type + * @value: timeout interval value + */ +struct ieee80211_timeout_interval_ie { + u8 type; + __le32 value; +} __packed; + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6ad019d32623..c783e996bcce 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1180,7 +1180,7 @@ struct ieee802_11_elems { const struct ieee80211_channel_sw_ie *ch_switch_ie; const u8 *country_elem; const u8 *pwr_constr_elem; - const u8 *timeout_int; + const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; /* length of them, respectively */ @@ -1198,7 +1198,6 @@ struct ieee802_11_elems { u8 prep_len; u8 perr_len; u8 country_elem_len; - u8 timeout_int_len; /* whether a parse error occurred while retrieving these elements */ bool parse_error; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 157d951df7a4..304d6cfc6250 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2629,10 +2629,10 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && - elems.timeout_int && elems.timeout_int_len == 5 && - elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) { + elems.timeout_int && + elems.timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; - tu = get_unaligned_le32(elems.timeout_int + 1); + tu = le32_to_cpu(elems.timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 4839dec5c9ac..f9581c6378ae 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -874,8 +874,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, elems->pwr_constr_elem = pos; break; case WLAN_EID_TIMEOUT_INTERVAL: - elems->timeout_int = pos; - elems->timeout_int_len = elen; + if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) + elems->timeout_int = (void *)pos; + else + elem_parse_failed = true; break; default: break; -- cgit v1.2.3-70-g09d2 From 1b8664341100716202c29d67f24d67094a82971e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 9 Apr 2013 05:54:01 +0000 Subject: net: sctp: introduce uapi header for sctp This patch introduces an UAPI header for the SCTP protocol, so that we can facilitate the maintenance and development of user land applications or libraries, in particular in terms of header synchronization. To not break compatibility, some fragments from lksctp-tools' netinet/sctp.h have been carefully included, while taking care that neither kernel nor user land breaks, so both compile fine with this change (for lksctp-tools I tested with the old netinet/sctp.h header and with a newly adapted one that includes the uapi sctp header). lksctp-tools smoke test run through successfully as well in both cases. Suggested-by: Neil Horman Cc: Neil Horman Cc: Vlad Yasevich Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- fs/dlm/lowcomms.c | 2 +- include/linux/sctp.h | 6 +- include/net/sctp/constants.h | 1 - include/net/sctp/user.h | 782 --------------------------------------- include/uapi/linux/Kbuild | 1 + include/uapi/linux/sctp.h | 846 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 850 insertions(+), 788 deletions(-) delete mode 100644 include/net/sctp/user.h create mode 100644 include/uapi/linux/sctp.h (limited to 'include/linux') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 4f5ad246582f..d0ccd2fd79eb 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -52,8 +52,8 @@ #include #include #include +#include #include -#include #include #include "dlm_internal.h" diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c11a28706fa4..3bfe8d6ee248 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -53,7 +53,9 @@ #include /* We need in_addr. */ #include /* We need in6_addr. */ +#include +#include /* Section 3.1. SCTP Common Header Format */ typedef struct sctphdr { @@ -63,14 +65,10 @@ typedef struct sctphdr { __le32 checksum; } __packed sctp_sctphdr_t; -#ifdef __KERNEL__ -#include - static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { return (struct sctphdr *)skb_transport_header(skb); } -#endif /* Section 3.2. Chunk Field Descriptions. */ typedef struct sctp_chunkhdr { diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index a7dd5c50df79..ca50e0751e47 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -49,7 +49,6 @@ #include #include /* For ipv6hdr. */ -#include #include /* For TCP states used in sctp_sock_state_t */ /* Value used for stream negotiation. */ diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h deleted file mode 100644 index 9a0ae091366d..000000000000 --- a/include/net/sctp/user.h +++ /dev/null @@ -1,782 +0,0 @@ -/* SCTP kernel implementation - * (C) Copyright IBM Corp. 2001, 2004 - * Copyright (c) 1999-2000 Cisco, Inc. - * Copyright (c) 1999-2001 Motorola, Inc. - * Copyright (c) 2002 Intel Corp. - * - * This file is part of the SCTP kernel implementation - * - * This header represents the structures and constants needed to support - * the SCTP Extension to the Sockets API. - * - * This SCTP implementation is free software; - * you can redistribute it and/or modify it under the terms of - * the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This SCTP implementation is distributed in the hope that it - * will be useful, but WITHOUT ANY WARRANTY; without even the implied - * ************************ - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - * - * Please send any bug reports or fixes you make to the - * email address(es): - * lksctp developers - * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * - * Written or modified by: - * La Monte H.P. Yarroll - * R. Stewart - * K. Morneau - * Q. Xie - * Karl Knutson - * Jon Grimm - * Daisy Chang - * Ryan Layer - * Ardelle Fan - * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. - */ - -#ifndef __net_sctp_user_h__ -#define __net_sctp_user_h__ - -#include -#include - -typedef __s32 sctp_assoc_t; - -/* The following symbols come from the Sockets API Extensions for - * SCTP . - */ -#define SCTP_RTOINFO 0 -#define SCTP_ASSOCINFO 1 -#define SCTP_INITMSG 2 -#define SCTP_NODELAY 3 /* Get/set nodelay option. */ -#define SCTP_AUTOCLOSE 4 -#define SCTP_SET_PEER_PRIMARY_ADDR 5 -#define SCTP_PRIMARY_ADDR 6 -#define SCTP_ADAPTATION_LAYER 7 -#define SCTP_DISABLE_FRAGMENTS 8 -#define SCTP_PEER_ADDR_PARAMS 9 -#define SCTP_DEFAULT_SEND_PARAM 10 -#define SCTP_EVENTS 11 -#define SCTP_I_WANT_MAPPED_V4_ADDR 12 /* Turn on/off mapped v4 addresses */ -#define SCTP_MAXSEG 13 /* Get/set maximum fragment. */ -#define SCTP_STATUS 14 -#define SCTP_GET_PEER_ADDR_INFO 15 -#define SCTP_DELAYED_ACK_TIME 16 -#define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME -#define SCTP_DELAYED_SACK SCTP_DELAYED_ACK_TIME -#define SCTP_CONTEXT 17 -#define SCTP_FRAGMENT_INTERLEAVE 18 -#define SCTP_PARTIAL_DELIVERY_POINT 19 /* Set/Get partial delivery point */ -#define SCTP_MAX_BURST 20 /* Set/Get max burst */ -#define SCTP_AUTH_CHUNK 21 /* Set only: add a chunk type to authenticate */ -#define SCTP_HMAC_IDENT 22 -#define SCTP_AUTH_KEY 23 -#define SCTP_AUTH_ACTIVE_KEY 24 -#define SCTP_AUTH_DELETE_KEY 25 -#define SCTP_PEER_AUTH_CHUNKS 26 /* Read only */ -#define SCTP_LOCAL_AUTH_CHUNKS 27 /* Read only */ -#define SCTP_GET_ASSOC_NUMBER 28 /* Read only */ -#define SCTP_GET_ASSOC_ID_LIST 29 /* Read only */ -#define SCTP_AUTO_ASCONF 30 -#define SCTP_PEER_ADDR_THLDS 31 - -/* Internal Socket Options. Some of the sctp library functions are - * implemented using these socket options. - */ -#define SCTP_SOCKOPT_BINDX_ADD 100 /* BINDX requests for adding addrs */ -#define SCTP_SOCKOPT_BINDX_REM 101 /* BINDX requests for removing addrs. */ -#define SCTP_SOCKOPT_PEELOFF 102 /* peel off association. */ -/* Options 104-106 are deprecated and removed. Do not use this space */ -#define SCTP_SOCKOPT_CONNECTX_OLD 107 /* CONNECTX old requests. */ -#define SCTP_GET_PEER_ADDRS 108 /* Get all peer address. */ -#define SCTP_GET_LOCAL_ADDRS 109 /* Get all local address. */ -#define SCTP_SOCKOPT_CONNECTX 110 /* CONNECTX requests. */ -#define SCTP_SOCKOPT_CONNECTX3 111 /* CONNECTX requests (updated) */ -#define SCTP_GET_ASSOC_STATS 112 /* Read only */ - -/* - * 5.2.1 SCTP Initiation Structure (SCTP_INIT) - * - * This cmsghdr structure provides information for initializing new - * SCTP associations with sendmsg(). The SCTP_INITMSG socket option - * uses this same data structure. This structure is not used for - * recvmsg(). - * - * cmsg_level cmsg_type cmsg_data[] - * ------------ ------------ ---------------------- - * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg - * - */ -struct sctp_initmsg { - __u16 sinit_num_ostreams; - __u16 sinit_max_instreams; - __u16 sinit_max_attempts; - __u16 sinit_max_init_timeo; -}; - -/* - * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) - * - * This cmsghdr structure specifies SCTP options for sendmsg() and - * describes SCTP header information about a received message through - * recvmsg(). - * - * cmsg_level cmsg_type cmsg_data[] - * ------------ ------------ ---------------------- - * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo - * - */ -struct sctp_sndrcvinfo { - __u16 sinfo_stream; - __u16 sinfo_ssn; - __u16 sinfo_flags; - __u32 sinfo_ppid; - __u32 sinfo_context; - __u32 sinfo_timetolive; - __u32 sinfo_tsn; - __u32 sinfo_cumtsn; - sctp_assoc_t sinfo_assoc_id; -}; - -/* - * sinfo_flags: 16 bits (unsigned integer) - * - * This field may contain any of the following flags and is composed of - * a bitwise OR of these values. - */ - -enum sctp_sinfo_flags { - SCTP_UNORDERED = 1, /* Send/receive message unordered. */ - SCTP_ADDR_OVER = 2, /* Override the primary destination. */ - SCTP_ABORT=4, /* Send an ABORT message to the peer. */ - SCTP_SACK_IMMEDIATELY = 8, /* SACK should be sent without delay */ - SCTP_EOF=MSG_FIN, /* Initiate graceful shutdown process. */ -}; - - -/* These are cmsg_types. */ -typedef enum sctp_cmsg_type { - SCTP_INIT, /* 5.2.1 SCTP Initiation Structure */ - SCTP_SNDRCV, /* 5.2.2 SCTP Header Information Structure */ -} sctp_cmsg_t; - - -/* - * 5.3.1.1 SCTP_ASSOC_CHANGE - * - * Communication notifications inform the ULP that an SCTP association - * has either begun or ended. The identifier for a new association is - * provided by this notificaion. The notification information has the - * following format: - * - */ -struct sctp_assoc_change { - __u16 sac_type; - __u16 sac_flags; - __u32 sac_length; - __u16 sac_state; - __u16 sac_error; - __u16 sac_outbound_streams; - __u16 sac_inbound_streams; - sctp_assoc_t sac_assoc_id; - __u8 sac_info[0]; -}; - -/* - * sac_state: 32 bits (signed integer) - * - * This field holds one of a number of values that communicate the - * event that happened to the association. They include: - * - * Note: The following state names deviate from the API draft as - * the names clash too easily with other kernel symbols. - */ -enum sctp_sac_state { - SCTP_COMM_UP, - SCTP_COMM_LOST, - SCTP_RESTART, - SCTP_SHUTDOWN_COMP, - SCTP_CANT_STR_ASSOC, -}; - -/* - * 5.3.1.2 SCTP_PEER_ADDR_CHANGE - * - * When a destination address on a multi-homed peer encounters a change - * an interface details event is sent. The information has the - * following structure: - */ -struct sctp_paddr_change { - __u16 spc_type; - __u16 spc_flags; - __u32 spc_length; - struct sockaddr_storage spc_aaddr; - int spc_state; - int spc_error; - sctp_assoc_t spc_assoc_id; -} __attribute__((packed, aligned(4))); - -/* - * spc_state: 32 bits (signed integer) - * - * This field holds one of a number of values that communicate the - * event that happened to the address. They include: - */ -enum sctp_spc_state { - SCTP_ADDR_AVAILABLE, - SCTP_ADDR_UNREACHABLE, - SCTP_ADDR_REMOVED, - SCTP_ADDR_ADDED, - SCTP_ADDR_MADE_PRIM, - SCTP_ADDR_CONFIRMED, -}; - - -/* - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * A remote peer may send an Operational Error message to its peer. - * This message indicates a variety of error conditions on an - * association. The entire error TLV as it appears on the wire is - * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP - * specification [SCTP] and any extensions for a list of possible - * error formats. SCTP error TLVs have the format: - */ -struct sctp_remote_error { - __u16 sre_type; - __u16 sre_flags; - __u32 sre_length; - __u16 sre_error; - sctp_assoc_t sre_assoc_id; - __u8 sre_data[0]; -}; - - -/* - * 5.3.1.4 SCTP_SEND_FAILED - * - * If SCTP cannot deliver a message it may return the message as a - * notification. - */ -struct sctp_send_failed { - __u16 ssf_type; - __u16 ssf_flags; - __u32 ssf_length; - __u32 ssf_error; - struct sctp_sndrcvinfo ssf_info; - sctp_assoc_t ssf_assoc_id; - __u8 ssf_data[0]; -}; - -/* - * ssf_flags: 16 bits (unsigned integer) - * - * The flag value will take one of the following values - * - * SCTP_DATA_UNSENT - Indicates that the data was never put on - * the wire. - * - * SCTP_DATA_SENT - Indicates that the data was put on the wire. - * Note that this does not necessarily mean that the - * data was (or was not) successfully delivered. - */ -enum sctp_ssf_flags { - SCTP_DATA_UNSENT, - SCTP_DATA_SENT, -}; - -/* - * 5.3.1.5 SCTP_SHUTDOWN_EVENT - * - * When a peer sends a SHUTDOWN, SCTP delivers this notification to - * inform the application that it should cease sending data. - */ -struct sctp_shutdown_event { - __u16 sse_type; - __u16 sse_flags; - __u32 sse_length; - sctp_assoc_t sse_assoc_id; -}; - -/* - * 5.3.1.6 SCTP_ADAPTATION_INDICATION - * - * When a peer sends a Adaptation Layer Indication parameter , SCTP - * delivers this notification to inform the application - * that of the peers requested adaptation layer. - */ -struct sctp_adaptation_event { - __u16 sai_type; - __u16 sai_flags; - __u32 sai_length; - __u32 sai_adaptation_ind; - sctp_assoc_t sai_assoc_id; -}; - -/* - * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT - * - * When a receiver is engaged in a partial delivery of a - * message this notification will be used to indicate - * various events. - */ -struct sctp_pdapi_event { - __u16 pdapi_type; - __u16 pdapi_flags; - __u32 pdapi_length; - __u32 pdapi_indication; - sctp_assoc_t pdapi_assoc_id; -}; - -enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; - -struct sctp_authkey_event { - __u16 auth_type; - __u16 auth_flags; - __u32 auth_length; - __u16 auth_keynumber; - __u16 auth_altkeynumber; - __u32 auth_indication; - sctp_assoc_t auth_assoc_id; -}; - -enum { SCTP_AUTH_NEWKEY = 0, }; - -/* - * 6.1.9. SCTP_SENDER_DRY_EVENT - * - * When the SCTP stack has no more user data to send or retransmit, this - * notification is given to the user. Also, at the time when a user app - * subscribes to this event, if there is no data to be sent or - * retransmit, the stack will immediately send up this notification. - */ -struct sctp_sender_dry_event { - __u16 sender_dry_type; - __u16 sender_dry_flags; - __u32 sender_dry_length; - sctp_assoc_t sender_dry_assoc_id; -}; - -/* - * Described in Section 7.3 - * Ancillary Data and Notification Interest Options - */ -struct sctp_event_subscribe { - __u8 sctp_data_io_event; - __u8 sctp_association_event; - __u8 sctp_address_event; - __u8 sctp_send_failure_event; - __u8 sctp_peer_error_event; - __u8 sctp_shutdown_event; - __u8 sctp_partial_delivery_event; - __u8 sctp_adaptation_layer_event; - __u8 sctp_authentication_event; - __u8 sctp_sender_dry_event; -}; - -/* - * 5.3.1 SCTP Notification Structure - * - * The notification structure is defined as the union of all - * notification types. - * - */ -union sctp_notification { - struct { - __u16 sn_type; /* Notification type. */ - __u16 sn_flags; - __u32 sn_length; - } sn_header; - struct sctp_assoc_change sn_assoc_change; - struct sctp_paddr_change sn_paddr_change; - struct sctp_remote_error sn_remote_error; - struct sctp_send_failed sn_send_failed; - struct sctp_shutdown_event sn_shutdown_event; - struct sctp_adaptation_event sn_adaptation_event; - struct sctp_pdapi_event sn_pdapi_event; - struct sctp_authkey_event sn_authkey_event; - struct sctp_sender_dry_event sn_sender_dry_event; -}; - -/* Section 5.3.1 - * All standard values for sn_type flags are greater than 2^15. - * Values from 2^15 and down are reserved. - */ - -enum sctp_sn_type { - SCTP_SN_TYPE_BASE = (1<<15), - SCTP_ASSOC_CHANGE, - SCTP_PEER_ADDR_CHANGE, - SCTP_SEND_FAILED, - SCTP_REMOTE_ERROR, - SCTP_SHUTDOWN_EVENT, - SCTP_PARTIAL_DELIVERY_EVENT, - SCTP_ADAPTATION_INDICATION, - SCTP_AUTHENTICATION_EVENT, -#define SCTP_AUTHENTICATION_INDICATION SCTP_AUTHENTICATION_EVENT - SCTP_SENDER_DRY_EVENT, -}; - -/* Notification error codes used to fill up the error fields in some - * notifications. - * SCTP_PEER_ADDRESS_CHAGE : spc_error - * SCTP_ASSOC_CHANGE : sac_error - * These names should be potentially included in the draft 04 of the SCTP - * sockets API specification. - */ -typedef enum sctp_sn_error { - SCTP_FAILED_THRESHOLD, - SCTP_RECEIVED_SACK, - SCTP_HEARTBEAT_SUCCESS, - SCTP_RESPONSE_TO_USER_REQ, - SCTP_INTERNAL_ERROR, - SCTP_SHUTDOWN_GUARD_EXPIRES, - SCTP_PEER_FAULTY, -} sctp_sn_error_t; - -/* - * 7.1.1 Retransmission Timeout Parameters (SCTP_RTOINFO) - * - * The protocol parameters used to initialize and bound retransmission - * timeout (RTO) are tunable. See [SCTP] for more information on how - * these parameters are used in RTO calculation. - */ -struct sctp_rtoinfo { - sctp_assoc_t srto_assoc_id; - __u32 srto_initial; - __u32 srto_max; - __u32 srto_min; -}; - -/* - * 7.1.2 Association Parameters (SCTP_ASSOCINFO) - * - * This option is used to both examine and set various association and - * endpoint parameters. - */ -struct sctp_assocparams { - sctp_assoc_t sasoc_assoc_id; - __u16 sasoc_asocmaxrxt; - __u16 sasoc_number_peer_destinations; - __u32 sasoc_peer_rwnd; - __u32 sasoc_local_rwnd; - __u32 sasoc_cookie_life; -}; - -/* - * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) - * - * Requests that the peer mark the enclosed address as the association - * primary. The enclosed address must be one of the association's - * locally bound addresses. The following structure is used to make a - * set primary request: - */ -struct sctp_setpeerprim { - sctp_assoc_t sspp_assoc_id; - struct sockaddr_storage sspp_addr; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) - * - * Requests that the local SCTP stack use the enclosed peer address as - * the association primary. The enclosed address must be one of the - * association peer's addresses. The following structure is used to - * make a set peer primary request: - */ -struct sctp_prim { - sctp_assoc_t ssp_assoc_id; - struct sockaddr_storage ssp_addr; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) - * - * Requests that the local endpoint set the specified Adaptation Layer - * Indication parameter for all future INIT and INIT-ACK exchanges. - */ -struct sctp_setadaptation { - __u32 ssb_adaptation_ind; -}; - -/* - * 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) - * - * Applications can enable or disable heartbeats for any peer address - * of an association, modify an address's heartbeat interval, force a - * heartbeat to be sent immediately, and adjust the address's maximum - * number of retransmissions sent before an address is considered - * unreachable. The following structure is used to access and modify an - * address's parameters: - */ -enum sctp_spp_flags { - SPP_HB_ENABLE = 1<<0, /*Enable heartbeats*/ - SPP_HB_DISABLE = 1<<1, /*Disable heartbeats*/ - SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE, - SPP_HB_DEMAND = 1<<2, /*Send heartbeat immediately*/ - SPP_PMTUD_ENABLE = 1<<3, /*Enable PMTU discovery*/ - SPP_PMTUD_DISABLE = 1<<4, /*Disable PMTU discovery*/ - SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE, - SPP_SACKDELAY_ENABLE = 1<<5, /*Enable SACK*/ - SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ - SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, - SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ -}; - -struct sctp_paddrparams { - sctp_assoc_t spp_assoc_id; - struct sockaddr_storage spp_address; - __u32 spp_hbinterval; - __u16 spp_pathmaxrxt; - __u32 spp_pathmtu; - __u32 spp_sackdelay; - __u32 spp_flags; -} __attribute__((packed, aligned(4))); - -/* - * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) - * - * This set option adds a chunk type that the user is requesting to be - * received only in an authenticated way. Changes to the list of chunks - * will only effect future associations on the socket. - */ -struct sctp_authchunk { - __u8 sauth_chunk; -}; - -/* - * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) - * - * This option gets or sets the list of HMAC algorithms that the local - * endpoint requires the peer to use. -*/ -struct sctp_hmacalgo { - __u32 shmac_num_idents; - __u16 shmac_idents[]; -}; - -/* - * 7.1.20. Set a shared key (SCTP_AUTH_KEY) - * - * This option will set a shared secret key which is used to build an - * association shared key. - */ -struct sctp_authkey { - sctp_assoc_t sca_assoc_id; - __u16 sca_keynumber; - __u16 sca_keylength; - __u8 sca_key[]; -}; - -/* - * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) - * - * This option will get or set the active shared key to be used to build - * the association shared key. - */ - -struct sctp_authkeyid { - sctp_assoc_t scact_assoc_id; - __u16 scact_keynumber; -}; - - -/* - * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) - * - * This option will effect the way delayed acks are performed. This - * option allows you to get or set the delayed ack time, in - * milliseconds. It also allows changing the delayed ack frequency. - * Changing the frequency to 1 disables the delayed sack algorithm. If - * the assoc_id is 0, then this sets or gets the endpoints default - * values. If the assoc_id field is non-zero, then the set or get - * effects the specified association for the one to many model (the - * assoc_id field is ignored by the one to one model). Note that if - * sack_delay or sack_freq are 0 when setting this option, then the - * current values will remain unchanged. - */ -struct sctp_sack_info { - sctp_assoc_t sack_assoc_id; - uint32_t sack_delay; - uint32_t sack_freq; -}; - -struct sctp_assoc_value { - sctp_assoc_t assoc_id; - uint32_t assoc_value; -}; - -/* - * 7.2.2 Peer Address Information - * - * Applications can retrieve information about a specific peer address - * of an association, including its reachability state, congestion - * window, and retransmission timer values. This information is - * read-only. The following structure is used to access this - * information: - */ -struct sctp_paddrinfo { - sctp_assoc_t spinfo_assoc_id; - struct sockaddr_storage spinfo_address; - __s32 spinfo_state; - __u32 spinfo_cwnd; - __u32 spinfo_srtt; - __u32 spinfo_rto; - __u32 spinfo_mtu; -} __attribute__((packed, aligned(4))); - -/* Peer addresses's state. */ -/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] - * calls. - * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. - * Not yet confirmed by a heartbeat and not available for data - * transfers. - * ACTIVE : Peer address confirmed, active and available for data transfers. - * INACTIVE: Peer address inactive and not available for data transfers. - */ -enum sctp_spinfo_state { - SCTP_INACTIVE, - SCTP_PF, - SCTP_ACTIVE, - SCTP_UNCONFIRMED, - SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ -}; - -/* - * 7.2.1 Association Status (SCTP_STATUS) - * - * Applications can retrieve current status information about an - * association, including association state, peer receiver window size, - * number of unacked data chunks, and number of data chunks pending - * receipt. This information is read-only. The following structure is - * used to access this information: - */ -struct sctp_status { - sctp_assoc_t sstat_assoc_id; - __s32 sstat_state; - __u32 sstat_rwnd; - __u16 sstat_unackdata; - __u16 sstat_penddata; - __u16 sstat_instrms; - __u16 sstat_outstrms; - __u32 sstat_fragmentation_point; - struct sctp_paddrinfo sstat_primary; -}; - -/* - * 7.2.3. Get the list of chunks the peer requires to be authenticated - * (SCTP_PEER_AUTH_CHUNKS) - * - * This option gets a list of chunks for a specified association that - * the peer requires to be received authenticated only. - */ -struct sctp_authchunks { - sctp_assoc_t gauth_assoc_id; - __u32 gauth_number_of_chunks; - uint8_t gauth_chunks[]; -}; - -/* - * 8.2.6. Get the Current Identifiers of Associations - * (SCTP_GET_ASSOC_ID_LIST) - * - * This option gets the current list of SCTP association identifiers of - * the SCTP associations handled by a one-to-many style socket. - */ -struct sctp_assoc_ids { - __u32 gaids_number_of_ids; - sctp_assoc_t gaids_assoc_id[]; -}; - -/* - * 8.3, 8.5 get all peer/local addresses in an association. - * This parameter struct is used by SCTP_GET_PEER_ADDRS and - * SCTP_GET_LOCAL_ADDRS socket options used internally to implement - * sctp_getpaddrs() and sctp_getladdrs() API. - */ -struct sctp_getaddrs_old { - sctp_assoc_t assoc_id; - int addr_num; - struct sockaddr __user *addrs; -}; -struct sctp_getaddrs { - sctp_assoc_t assoc_id; /*input*/ - __u32 addr_num; /*output*/ - __u8 addrs[0]; /*output, variable size*/ -}; - -/* A socket user request obtained via SCTP_GET_ASSOC_STATS that retrieves - * association stats. All stats are counts except sas_maxrto and - * sas_obs_rto_ipaddr. maxrto is the max observed rto + transport since - * the last call. Will return 0 when RTO was not update since last call - */ -struct sctp_assoc_stats { - sctp_assoc_t sas_assoc_id; /* Input */ - /* Transport of observed max RTO */ - struct sockaddr_storage sas_obs_rto_ipaddr; - __u64 sas_maxrto; /* Maximum Observed RTO for period */ - __u64 sas_isacks; /* SACKs received */ - __u64 sas_osacks; /* SACKs sent */ - __u64 sas_opackets; /* Packets sent */ - __u64 sas_ipackets; /* Packets received */ - __u64 sas_rtxchunks; /* Retransmitted Chunks */ - __u64 sas_outofseqtsns;/* TSN received > next expected */ - __u64 sas_idupchunks; /* Dups received (ordered+unordered) */ - __u64 sas_gapcnt; /* Gap Acknowledgements Received */ - __u64 sas_ouodchunks; /* Unordered data chunks sent */ - __u64 sas_iuodchunks; /* Unordered data chunks received */ - __u64 sas_oodchunks; /* Ordered data chunks sent */ - __u64 sas_iodchunks; /* Ordered data chunks received */ - __u64 sas_octrlchunks; /* Control chunks sent */ - __u64 sas_ictrlchunks; /* Control chunks received */ -}; - -/* These are bit fields for msghdr->msg_flags. See section 5.1. */ -/* On user space Linux, these live in as an enum. */ -enum sctp_msg_flags { - MSG_NOTIFICATION = 0x8000, -#define MSG_NOTIFICATION MSG_NOTIFICATION -}; - -/* - * 8.1 sctp_bindx() - * - * The flags parameter is formed from the bitwise OR of zero or more of the - * following currently defined flags: - */ -#define SCTP_BINDX_ADD_ADDR 0x01 -#define SCTP_BINDX_REM_ADDR 0x02 - -/* This is the structure that is passed as an argument(optval) to - * getsockopt(SCTP_SOCKOPT_PEELOFF). - */ -typedef struct { - sctp_assoc_t associd; - int sd; -} sctp_peeloff_arg_t; - -/* - * Peer Address Thresholds socket option - */ -struct sctp_paddrthlds { - sctp_assoc_t spt_assoc_id; - struct sockaddr_storage spt_address; - __u16 spt_pathmaxrxt; - __u16 spt_pathpfthld; -}; -#endif /* __net_sctp_user_h__ */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 5c8a1d25e21c..7df190525337 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -331,6 +331,7 @@ header-y += rtnetlink.h header-y += scc.h header-y += sched.h header-y += screen_info.h +header-y += sctp.h header-y += sdla.h header-y += seccomp.h header-y += securebits.h diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h new file mode 100644 index 000000000000..66b466e4ca08 --- /dev/null +++ b/include/uapi/linux/sctp.h @@ -0,0 +1,846 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2002 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * This header represents the structures and constants needed to support + * the SCTP Extension to the Sockets API. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * La Monte H.P. Yarroll + * R. Stewart + * K. Morneau + * Q. Xie + * Karl Knutson + * Jon Grimm + * Daisy Chang + * Ryan Layer + * Ardelle Fan + * Sridhar Samudrala + * Inaky Perez-Gonzalez + * Vlad Yasevich + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#ifndef _UAPI_SCTP_H +#define _UAPI_SCTP_H + +#include +#include + +typedef __s32 sctp_assoc_t; + +/* The following symbols come from the Sockets API Extensions for + * SCTP . + */ +#define SCTP_RTOINFO 0 +#define SCTP_ASSOCINFO 1 +#define SCTP_INITMSG 2 +#define SCTP_NODELAY 3 /* Get/set nodelay option. */ +#define SCTP_AUTOCLOSE 4 +#define SCTP_SET_PEER_PRIMARY_ADDR 5 +#define SCTP_PRIMARY_ADDR 6 +#define SCTP_ADAPTATION_LAYER 7 +#define SCTP_DISABLE_FRAGMENTS 8 +#define SCTP_PEER_ADDR_PARAMS 9 +#define SCTP_DEFAULT_SEND_PARAM 10 +#define SCTP_EVENTS 11 +#define SCTP_I_WANT_MAPPED_V4_ADDR 12 /* Turn on/off mapped v4 addresses */ +#define SCTP_MAXSEG 13 /* Get/set maximum fragment. */ +#define SCTP_STATUS 14 +#define SCTP_GET_PEER_ADDR_INFO 15 +#define SCTP_DELAYED_ACK_TIME 16 +#define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME +#define SCTP_DELAYED_SACK SCTP_DELAYED_ACK_TIME +#define SCTP_CONTEXT 17 +#define SCTP_FRAGMENT_INTERLEAVE 18 +#define SCTP_PARTIAL_DELIVERY_POINT 19 /* Set/Get partial delivery point */ +#define SCTP_MAX_BURST 20 /* Set/Get max burst */ +#define SCTP_AUTH_CHUNK 21 /* Set only: add a chunk type to authenticate */ +#define SCTP_HMAC_IDENT 22 +#define SCTP_AUTH_KEY 23 +#define SCTP_AUTH_ACTIVE_KEY 24 +#define SCTP_AUTH_DELETE_KEY 25 +#define SCTP_PEER_AUTH_CHUNKS 26 /* Read only */ +#define SCTP_LOCAL_AUTH_CHUNKS 27 /* Read only */ +#define SCTP_GET_ASSOC_NUMBER 28 /* Read only */ +#define SCTP_GET_ASSOC_ID_LIST 29 /* Read only */ +#define SCTP_AUTO_ASCONF 30 +#define SCTP_PEER_ADDR_THLDS 31 + +/* Internal Socket Options. Some of the sctp library functions are + * implemented using these socket options. + */ +#define SCTP_SOCKOPT_BINDX_ADD 100 /* BINDX requests for adding addrs */ +#define SCTP_SOCKOPT_BINDX_REM 101 /* BINDX requests for removing addrs. */ +#define SCTP_SOCKOPT_PEELOFF 102 /* peel off association. */ +/* Options 104-106 are deprecated and removed. Do not use this space */ +#define SCTP_SOCKOPT_CONNECTX_OLD 107 /* CONNECTX old requests. */ +#define SCTP_GET_PEER_ADDRS 108 /* Get all peer address. */ +#define SCTP_GET_LOCAL_ADDRS 109 /* Get all local address. */ +#define SCTP_SOCKOPT_CONNECTX 110 /* CONNECTX requests. */ +#define SCTP_SOCKOPT_CONNECTX3 111 /* CONNECTX requests (updated) */ +#define SCTP_GET_ASSOC_STATS 112 /* Read only */ + +/* + * 5.2.1 SCTP Initiation Structure (SCTP_INIT) + * + * This cmsghdr structure provides information for initializing new + * SCTP associations with sendmsg(). The SCTP_INITMSG socket option + * uses this same data structure. This structure is not used for + * recvmsg(). + * + * cmsg_level cmsg_type cmsg_data[] + * ------------ ------------ ---------------------- + * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg + * + */ +struct sctp_initmsg { + __u16 sinit_num_ostreams; + __u16 sinit_max_instreams; + __u16 sinit_max_attempts; + __u16 sinit_max_init_timeo; +}; + +/* + * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) + * + * This cmsghdr structure specifies SCTP options for sendmsg() and + * describes SCTP header information about a received message through + * recvmsg(). + * + * cmsg_level cmsg_type cmsg_data[] + * ------------ ------------ ---------------------- + * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo + * + */ +struct sctp_sndrcvinfo { + __u16 sinfo_stream; + __u16 sinfo_ssn; + __u16 sinfo_flags; + __u32 sinfo_ppid; + __u32 sinfo_context; + __u32 sinfo_timetolive; + __u32 sinfo_tsn; + __u32 sinfo_cumtsn; + sctp_assoc_t sinfo_assoc_id; +}; + +/* + * sinfo_flags: 16 bits (unsigned integer) + * + * This field may contain any of the following flags and is composed of + * a bitwise OR of these values. + */ + +enum sctp_sinfo_flags { + SCTP_UNORDERED = 1, /* Send/receive message unordered. */ + SCTP_ADDR_OVER = 2, /* Override the primary destination. */ + SCTP_ABORT=4, /* Send an ABORT message to the peer. */ + SCTP_SACK_IMMEDIATELY = 8, /* SACK should be sent without delay */ + SCTP_EOF=MSG_FIN, /* Initiate graceful shutdown process. */ +}; + +typedef union { + __u8 raw; + struct sctp_initmsg init; + struct sctp_sndrcvinfo sndrcv; +} sctp_cmsg_data_t; + +/* These are cmsg_types. */ +typedef enum sctp_cmsg_type { + SCTP_INIT, /* 5.2.1 SCTP Initiation Structure */ +#define SCTP_INIT SCTP_INIT + SCTP_SNDRCV, /* 5.2.2 SCTP Header Information Structure */ +#define SCTP_SNDRCV SCTP_SNDRCV +} sctp_cmsg_t; + +/* + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * Communication notifications inform the ULP that an SCTP association + * has either begun or ended. The identifier for a new association is + * provided by this notificaion. The notification information has the + * following format: + * + */ +struct sctp_assoc_change { + __u16 sac_type; + __u16 sac_flags; + __u32 sac_length; + __u16 sac_state; + __u16 sac_error; + __u16 sac_outbound_streams; + __u16 sac_inbound_streams; + sctp_assoc_t sac_assoc_id; + __u8 sac_info[0]; +}; + +/* + * sac_state: 32 bits (signed integer) + * + * This field holds one of a number of values that communicate the + * event that happened to the association. They include: + * + * Note: The following state names deviate from the API draft as + * the names clash too easily with other kernel symbols. + */ +enum sctp_sac_state { + SCTP_COMM_UP, + SCTP_COMM_LOST, + SCTP_RESTART, + SCTP_SHUTDOWN_COMP, + SCTP_CANT_STR_ASSOC, +}; + +/* + * 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * When a destination address on a multi-homed peer encounters a change + * an interface details event is sent. The information has the + * following structure: + */ +struct sctp_paddr_change { + __u16 spc_type; + __u16 spc_flags; + __u32 spc_length; + struct sockaddr_storage spc_aaddr; + int spc_state; + int spc_error; + sctp_assoc_t spc_assoc_id; +} __attribute__((packed, aligned(4))); + +/* + * spc_state: 32 bits (signed integer) + * + * This field holds one of a number of values that communicate the + * event that happened to the address. They include: + */ +enum sctp_spc_state { + SCTP_ADDR_AVAILABLE, + SCTP_ADDR_UNREACHABLE, + SCTP_ADDR_REMOVED, + SCTP_ADDR_ADDED, + SCTP_ADDR_MADE_PRIM, + SCTP_ADDR_CONFIRMED, +}; + + +/* + * 5.3.1.3 SCTP_REMOTE_ERROR + * + * A remote peer may send an Operational Error message to its peer. + * This message indicates a variety of error conditions on an + * association. The entire error TLV as it appears on the wire is + * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP + * specification [SCTP] and any extensions for a list of possible + * error formats. SCTP error TLVs have the format: + */ +struct sctp_remote_error { + __u16 sre_type; + __u16 sre_flags; + __u32 sre_length; + __u16 sre_error; + sctp_assoc_t sre_assoc_id; + __u8 sre_data[0]; +}; + + +/* + * 5.3.1.4 SCTP_SEND_FAILED + * + * If SCTP cannot deliver a message it may return the message as a + * notification. + */ +struct sctp_send_failed { + __u16 ssf_type; + __u16 ssf_flags; + __u32 ssf_length; + __u32 ssf_error; + struct sctp_sndrcvinfo ssf_info; + sctp_assoc_t ssf_assoc_id; + __u8 ssf_data[0]; +}; + +/* + * ssf_flags: 16 bits (unsigned integer) + * + * The flag value will take one of the following values + * + * SCTP_DATA_UNSENT - Indicates that the data was never put on + * the wire. + * + * SCTP_DATA_SENT - Indicates that the data was put on the wire. + * Note that this does not necessarily mean that the + * data was (or was not) successfully delivered. + */ +enum sctp_ssf_flags { + SCTP_DATA_UNSENT, + SCTP_DATA_SENT, +}; + +/* + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + * + * When a peer sends a SHUTDOWN, SCTP delivers this notification to + * inform the application that it should cease sending data. + */ +struct sctp_shutdown_event { + __u16 sse_type; + __u16 sse_flags; + __u32 sse_length; + sctp_assoc_t sse_assoc_id; +}; + +/* + * 5.3.1.6 SCTP_ADAPTATION_INDICATION + * + * When a peer sends a Adaptation Layer Indication parameter , SCTP + * delivers this notification to inform the application + * that of the peers requested adaptation layer. + */ +struct sctp_adaptation_event { + __u16 sai_type; + __u16 sai_flags; + __u32 sai_length; + __u32 sai_adaptation_ind; + sctp_assoc_t sai_assoc_id; +}; + +/* + * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT + * + * When a receiver is engaged in a partial delivery of a + * message this notification will be used to indicate + * various events. + */ +struct sctp_pdapi_event { + __u16 pdapi_type; + __u16 pdapi_flags; + __u32 pdapi_length; + __u32 pdapi_indication; + sctp_assoc_t pdapi_assoc_id; +}; + +enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; + +/* + * 5.3.1.8. SCTP_AUTHENTICATION_EVENT + * + * When a receiver is using authentication this message will provide + * notifications regarding new keys being made active as well as errors. + */ +struct sctp_authkey_event { + __u16 auth_type; + __u16 auth_flags; + __u32 auth_length; + __u16 auth_keynumber; + __u16 auth_altkeynumber; + __u32 auth_indication; + sctp_assoc_t auth_assoc_id; +}; + +enum { SCTP_AUTH_NEWKEY = 0, }; + +/* + * 6.1.9. SCTP_SENDER_DRY_EVENT + * + * When the SCTP stack has no more user data to send or retransmit, this + * notification is given to the user. Also, at the time when a user app + * subscribes to this event, if there is no data to be sent or + * retransmit, the stack will immediately send up this notification. + */ +struct sctp_sender_dry_event { + __u16 sender_dry_type; + __u16 sender_dry_flags; + __u32 sender_dry_length; + sctp_assoc_t sender_dry_assoc_id; +}; + +/* + * Described in Section 7.3 + * Ancillary Data and Notification Interest Options + */ +struct sctp_event_subscribe { + __u8 sctp_data_io_event; + __u8 sctp_association_event; + __u8 sctp_address_event; + __u8 sctp_send_failure_event; + __u8 sctp_peer_error_event; + __u8 sctp_shutdown_event; + __u8 sctp_partial_delivery_event; + __u8 sctp_adaptation_layer_event; + __u8 sctp_authentication_event; + __u8 sctp_sender_dry_event; +}; + +/* + * 5.3.1 SCTP Notification Structure + * + * The notification structure is defined as the union of all + * notification types. + * + */ +union sctp_notification { + struct { + __u16 sn_type; /* Notification type. */ + __u16 sn_flags; + __u32 sn_length; + } sn_header; + struct sctp_assoc_change sn_assoc_change; + struct sctp_paddr_change sn_paddr_change; + struct sctp_remote_error sn_remote_error; + struct sctp_send_failed sn_send_failed; + struct sctp_shutdown_event sn_shutdown_event; + struct sctp_adaptation_event sn_adaptation_event; + struct sctp_pdapi_event sn_pdapi_event; + struct sctp_authkey_event sn_authkey_event; + struct sctp_sender_dry_event sn_sender_dry_event; +}; + +/* Section 5.3.1 + * All standard values for sn_type flags are greater than 2^15. + * Values from 2^15 and down are reserved. + */ + +enum sctp_sn_type { + SCTP_SN_TYPE_BASE = (1<<15), + SCTP_ASSOC_CHANGE, +#define SCTP_ASSOC_CHANGE SCTP_ASSOC_CHANGE + SCTP_PEER_ADDR_CHANGE, +#define SCTP_PEER_ADDR_CHANGE SCTP_PEER_ADDR_CHANGE + SCTP_SEND_FAILED, +#define SCTP_SEND_FAILED SCTP_SEND_FAILED + SCTP_REMOTE_ERROR, +#define SCTP_REMOTE_ERROR SCTP_REMOTE_ERROR + SCTP_SHUTDOWN_EVENT, +#define SCTP_SHUTDOWN_EVENT SCTP_SHUTDOWN_EVENT + SCTP_PARTIAL_DELIVERY_EVENT, +#define SCTP_PARTIAL_DELIVERY_EVENT SCTP_PARTIAL_DELIVERY_EVENT + SCTP_ADAPTATION_INDICATION, +#define SCTP_ADAPTATION_INDICATION SCTP_ADAPTATION_INDICATION + SCTP_AUTHENTICATION_EVENT, +#define SCTP_AUTHENTICATION_INDICATION SCTP_AUTHENTICATION_EVENT + SCTP_SENDER_DRY_EVENT, +#define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT +}; + +/* Notification error codes used to fill up the error fields in some + * notifications. + * SCTP_PEER_ADDRESS_CHAGE : spc_error + * SCTP_ASSOC_CHANGE : sac_error + * These names should be potentially included in the draft 04 of the SCTP + * sockets API specification. + */ +typedef enum sctp_sn_error { + SCTP_FAILED_THRESHOLD, + SCTP_RECEIVED_SACK, + SCTP_HEARTBEAT_SUCCESS, + SCTP_RESPONSE_TO_USER_REQ, + SCTP_INTERNAL_ERROR, + SCTP_SHUTDOWN_GUARD_EXPIRES, + SCTP_PEER_FAULTY, +} sctp_sn_error_t; + +/* + * 7.1.1 Retransmission Timeout Parameters (SCTP_RTOINFO) + * + * The protocol parameters used to initialize and bound retransmission + * timeout (RTO) are tunable. See [SCTP] for more information on how + * these parameters are used in RTO calculation. + */ +struct sctp_rtoinfo { + sctp_assoc_t srto_assoc_id; + __u32 srto_initial; + __u32 srto_max; + __u32 srto_min; +}; + +/* + * 7.1.2 Association Parameters (SCTP_ASSOCINFO) + * + * This option is used to both examine and set various association and + * endpoint parameters. + */ +struct sctp_assocparams { + sctp_assoc_t sasoc_assoc_id; + __u16 sasoc_asocmaxrxt; + __u16 sasoc_number_peer_destinations; + __u32 sasoc_peer_rwnd; + __u32 sasoc_local_rwnd; + __u32 sasoc_cookie_life; +}; + +/* + * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) + * + * Requests that the peer mark the enclosed address as the association + * primary. The enclosed address must be one of the association's + * locally bound addresses. The following structure is used to make a + * set primary request: + */ +struct sctp_setpeerprim { + sctp_assoc_t sspp_assoc_id; + struct sockaddr_storage sspp_addr; +} __attribute__((packed, aligned(4))); + +/* + * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) + * + * Requests that the local SCTP stack use the enclosed peer address as + * the association primary. The enclosed address must be one of the + * association peer's addresses. The following structure is used to + * make a set peer primary request: + */ +struct sctp_prim { + sctp_assoc_t ssp_assoc_id; + struct sockaddr_storage ssp_addr; +} __attribute__((packed, aligned(4))); + +/* For backward compatibility use, define the old name too */ +#define sctp_setprim sctp_prim + +/* + * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) + * + * Requests that the local endpoint set the specified Adaptation Layer + * Indication parameter for all future INIT and INIT-ACK exchanges. + */ +struct sctp_setadaptation { + __u32 ssb_adaptation_ind; +}; + +/* + * 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) + * + * Applications can enable or disable heartbeats for any peer address + * of an association, modify an address's heartbeat interval, force a + * heartbeat to be sent immediately, and adjust the address's maximum + * number of retransmissions sent before an address is considered + * unreachable. The following structure is used to access and modify an + * address's parameters: + */ +enum sctp_spp_flags { + SPP_HB_ENABLE = 1<<0, /*Enable heartbeats*/ + SPP_HB_DISABLE = 1<<1, /*Disable heartbeats*/ + SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE, + SPP_HB_DEMAND = 1<<2, /*Send heartbeat immediately*/ + SPP_PMTUD_ENABLE = 1<<3, /*Enable PMTU discovery*/ + SPP_PMTUD_DISABLE = 1<<4, /*Disable PMTU discovery*/ + SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE, + SPP_SACKDELAY_ENABLE = 1<<5, /*Enable SACK*/ + SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ + SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, + SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ +}; + +struct sctp_paddrparams { + sctp_assoc_t spp_assoc_id; + struct sockaddr_storage spp_address; + __u32 spp_hbinterval; + __u16 spp_pathmaxrxt; + __u32 spp_pathmtu; + __u32 spp_sackdelay; + __u32 spp_flags; +} __attribute__((packed, aligned(4))); + +/* + * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) + * + * This set option adds a chunk type that the user is requesting to be + * received only in an authenticated way. Changes to the list of chunks + * will only effect future associations on the socket. + */ +struct sctp_authchunk { + __u8 sauth_chunk; +}; + +/* + * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) + * + * This option gets or sets the list of HMAC algorithms that the local + * endpoint requires the peer to use. + */ +#ifndef __KERNEL__ +/* This here is only used by user space as is. It might not be a good idea + * to export/reveal the whole structure with reserved fields etc. + */ +enum { + SCTP_AUTH_HMAC_ID_SHA1 = 1, + SCTP_AUTH_HMAC_ID_SHA256 = 3, +}; +#endif + +struct sctp_hmacalgo { + __u32 shmac_num_idents; + __u16 shmac_idents[]; +}; + +/* Sadly, user and kernel space have different names for + * this structure member, so this is to not break anything. + */ +#define shmac_number_of_idents shmac_num_idents + +/* + * 7.1.20. Set a shared key (SCTP_AUTH_KEY) + * + * This option will set a shared secret key which is used to build an + * association shared key. + */ +struct sctp_authkey { + sctp_assoc_t sca_assoc_id; + __u16 sca_keynumber; + __u16 sca_keylength; + __u8 sca_key[]; +}; + +/* + * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) + * + * This option will get or set the active shared key to be used to build + * the association shared key. + */ + +struct sctp_authkeyid { + sctp_assoc_t scact_assoc_id; + __u16 scact_keynumber; +}; + + +/* + * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) + * + * This option will effect the way delayed acks are performed. This + * option allows you to get or set the delayed ack time, in + * milliseconds. It also allows changing the delayed ack frequency. + * Changing the frequency to 1 disables the delayed sack algorithm. If + * the assoc_id is 0, then this sets or gets the endpoints default + * values. If the assoc_id field is non-zero, then the set or get + * effects the specified association for the one to many model (the + * assoc_id field is ignored by the one to one model). Note that if + * sack_delay or sack_freq are 0 when setting this option, then the + * current values will remain unchanged. + */ +struct sctp_sack_info { + sctp_assoc_t sack_assoc_id; + uint32_t sack_delay; + uint32_t sack_freq; +}; + +struct sctp_assoc_value { + sctp_assoc_t assoc_id; + uint32_t assoc_value; +}; + +/* + * 7.2.2 Peer Address Information + * + * Applications can retrieve information about a specific peer address + * of an association, including its reachability state, congestion + * window, and retransmission timer values. This information is + * read-only. The following structure is used to access this + * information: + */ +struct sctp_paddrinfo { + sctp_assoc_t spinfo_assoc_id; + struct sockaddr_storage spinfo_address; + __s32 spinfo_state; + __u32 spinfo_cwnd; + __u32 spinfo_srtt; + __u32 spinfo_rto; + __u32 spinfo_mtu; +} __attribute__((packed, aligned(4))); + +/* Peer addresses's state. */ +/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] + * calls. + * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. + * Not yet confirmed by a heartbeat and not available for data + * transfers. + * ACTIVE : Peer address confirmed, active and available for data transfers. + * INACTIVE: Peer address inactive and not available for data transfers. + */ +enum sctp_spinfo_state { + SCTP_INACTIVE, + SCTP_PF, + SCTP_ACTIVE, + SCTP_UNCONFIRMED, + SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ +}; + +/* + * 7.2.1 Association Status (SCTP_STATUS) + * + * Applications can retrieve current status information about an + * association, including association state, peer receiver window size, + * number of unacked data chunks, and number of data chunks pending + * receipt. This information is read-only. The following structure is + * used to access this information: + */ +struct sctp_status { + sctp_assoc_t sstat_assoc_id; + __s32 sstat_state; + __u32 sstat_rwnd; + __u16 sstat_unackdata; + __u16 sstat_penddata; + __u16 sstat_instrms; + __u16 sstat_outstrms; + __u32 sstat_fragmentation_point; + struct sctp_paddrinfo sstat_primary; +}; + +/* + * 7.2.3. Get the list of chunks the peer requires to be authenticated + * (SCTP_PEER_AUTH_CHUNKS) + * + * This option gets a list of chunks for a specified association that + * the peer requires to be received authenticated only. + */ +struct sctp_authchunks { + sctp_assoc_t gauth_assoc_id; + __u32 gauth_number_of_chunks; + uint8_t gauth_chunks[]; +}; + +/* The broken spelling has been released already in lksctp-tools header, + * so don't break anyone, now that it's fixed. + */ +#define guth_number_of_chunks gauth_number_of_chunks + +/* Association states. */ +enum sctp_sstat_state { + SCTP_EMPTY = 0, + SCTP_CLOSED = 1, + SCTP_COOKIE_WAIT = 2, + SCTP_COOKIE_ECHOED = 3, + SCTP_ESTABLISHED = 4, + SCTP_SHUTDOWN_PENDING = 5, + SCTP_SHUTDOWN_SENT = 6, + SCTP_SHUTDOWN_RECEIVED = 7, + SCTP_SHUTDOWN_ACK_SENT = 8, +}; + +/* + * 8.2.6. Get the Current Identifiers of Associations + * (SCTP_GET_ASSOC_ID_LIST) + * + * This option gets the current list of SCTP association identifiers of + * the SCTP associations handled by a one-to-many style socket. + */ +struct sctp_assoc_ids { + __u32 gaids_number_of_ids; + sctp_assoc_t gaids_assoc_id[]; +}; + +/* + * 8.3, 8.5 get all peer/local addresses in an association. + * This parameter struct is used by SCTP_GET_PEER_ADDRS and + * SCTP_GET_LOCAL_ADDRS socket options used internally to implement + * sctp_getpaddrs() and sctp_getladdrs() API. + */ +struct sctp_getaddrs_old { + sctp_assoc_t assoc_id; + int addr_num; +#ifdef __KERNEL__ + struct sockaddr __user *addrs; +#else + struct sockaddr *addrs; +#endif +}; + +struct sctp_getaddrs { + sctp_assoc_t assoc_id; /*input*/ + __u32 addr_num; /*output*/ + __u8 addrs[0]; /*output, variable size*/ +}; + +/* A socket user request obtained via SCTP_GET_ASSOC_STATS that retrieves + * association stats. All stats are counts except sas_maxrto and + * sas_obs_rto_ipaddr. maxrto is the max observed rto + transport since + * the last call. Will return 0 when RTO was not update since last call + */ +struct sctp_assoc_stats { + sctp_assoc_t sas_assoc_id; /* Input */ + /* Transport of observed max RTO */ + struct sockaddr_storage sas_obs_rto_ipaddr; + __u64 sas_maxrto; /* Maximum Observed RTO for period */ + __u64 sas_isacks; /* SACKs received */ + __u64 sas_osacks; /* SACKs sent */ + __u64 sas_opackets; /* Packets sent */ + __u64 sas_ipackets; /* Packets received */ + __u64 sas_rtxchunks; /* Retransmitted Chunks */ + __u64 sas_outofseqtsns;/* TSN received > next expected */ + __u64 sas_idupchunks; /* Dups received (ordered+unordered) */ + __u64 sas_gapcnt; /* Gap Acknowledgements Received */ + __u64 sas_ouodchunks; /* Unordered data chunks sent */ + __u64 sas_iuodchunks; /* Unordered data chunks received */ + __u64 sas_oodchunks; /* Ordered data chunks sent */ + __u64 sas_iodchunks; /* Ordered data chunks received */ + __u64 sas_octrlchunks; /* Control chunks sent */ + __u64 sas_ictrlchunks; /* Control chunks received */ +}; + +/* These are bit fields for msghdr->msg_flags. See section 5.1. */ +/* On user space Linux, these live in as an enum. */ +enum sctp_msg_flags { + MSG_NOTIFICATION = 0x8000, +#define MSG_NOTIFICATION MSG_NOTIFICATION +}; + +/* + * 8.1 sctp_bindx() + * + * The flags parameter is formed from the bitwise OR of zero or more of the + * following currently defined flags: + */ +#define SCTP_BINDX_ADD_ADDR 0x01 +#define SCTP_BINDX_REM_ADDR 0x02 + +/* This is the structure that is passed as an argument(optval) to + * getsockopt(SCTP_SOCKOPT_PEELOFF). + */ +typedef struct { + sctp_assoc_t associd; + int sd; +} sctp_peeloff_arg_t; + +/* + * Peer Address Thresholds socket option + */ +struct sctp_paddrthlds { + sctp_assoc_t spt_assoc_id; + struct sockaddr_storage spt_address; + __u16 spt_pathmaxrxt; + __u16 spt_pathpfthld; +}; + +#endif /* _UAPI_SCTP_H */ -- cgit v1.2.3-70-g09d2 From ac64995da872c8ae6f74a5556fdad565829985b0 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 11 Apr 2013 04:40:30 +0000 Subject: usbnet: introduce usbnet_link_change API This patch introduces the API of usbnet_link_change, so that usbnet can handle link change centrally, which may help to implement killing traffic URBs for saving USB bus bandwidth and host controller power. Signed-off-by: Ming Lei Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 13 +++++++++++++ include/linux/usb/usbnet.h | 1 + 2 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 51f3192f3931..40e4237b0a3a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1653,6 +1653,19 @@ int usbnet_manage_power(struct usbnet *dev, int on) } EXPORT_SYMBOL(usbnet_manage_power); +void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) +{ + /* update link after link is reseted */ + if (link && !need_reset) + netif_carrier_on(dev->net); + else + netif_carrier_off(dev->net); + + if (need_reset && link) + usbnet_defer_kevent(dev, EVENT_LINK_RESET); +} +EXPORT_SYMBOL(usbnet_link_change); + /*-------------------------------------------------------------------------*/ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, void *data, u16 size) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 0e5ac93bab10..eb021b8f53bd 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -245,5 +245,6 @@ extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); extern int usbnet_manage_power(struct usbnet *, int); +extern void usbnet_link_change(struct usbnet *, bool, bool); #endif /* __LINUX_USB_USBNET_H */ -- cgit v1.2.3-70-g09d2 From 4b49f58fff00e6e9b24eaa31d4c6324393d76b0a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 11 Apr 2013 04:40:40 +0000 Subject: usbnet: handle link change The link change is detected via the interrupt pipe, and bulk pipes are responsible for transfering packets, so it is reasonable to stop bulk transfer after link is reported as off. Two adavantages may be obtained with stopping bulk transfer after link becomes off: - USB bus bandwidth is saved(USB bus is shared bus except for USB3.0), for example, lots of 'IN' token packets and 'NYET' handshake packets is transfered on 2.0 bus. - probabaly power might be saved for usb host controller since cancelling bulk transfer may disable the asynchronous schedule of host controller. With this patch, when link becomes off, about ~10% performance boost can be found on bulk transfer of anther usb device which is attached to same bus with the usbnet device, see below test on next-20130410: - read from usb mass storage(Sandisk Extreme USB 3.0) on pandaboard with below command after unplugging ethernet cable: dd if=/dev/sda iflag=direct of=/dev/null bs=1M count=800 - without the patch 1, 838860800 bytes (839 MB) copied, 36.2216 s, 23.2 MB/s 2, 838860800 bytes (839 MB) copied, 35.8368 s, 23.4 MB/s 3, 838860800 bytes (839 MB) copied, 35.823 s, 23.4 MB/s 4, 838860800 bytes (839 MB) copied, 35.937 s, 23.3 MB/s 5, 838860800 bytes (839 MB) copied, 35.7365 s, 23.5 MB/s average: 23.6MB/s - with the patch 1, 838860800 bytes (839 MB) copied, 32.3817 s, 25.9 MB/s 2, 838860800 bytes (839 MB) copied, 31.7389 s, 26.4 MB/s 3, 838860800 bytes (839 MB) copied, 32.438 s, 25.9 MB/s 4, 838860800 bytes (839 MB) copied, 32.5492 s, 25.8 MB/s 5, 838860800 bytes (839 MB) copied, 31.6178 s, 26.5 MB/s average: 26.1MB/s Signed-off-by: Ming Lei Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 30 ++++++++++++++++++++++++++++++ include/linux/usb/usbnet.h | 1 + 2 files changed, 31 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 34e425264a7a..1e5a9b72650e 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -938,6 +938,27 @@ static const struct ethtool_ops usbnet_ethtool_ops = { /*-------------------------------------------------------------------------*/ +static void __handle_link_change(struct usbnet *dev) +{ + if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) + return; + + if (!netif_carrier_ok(dev->net)) { + /* kill URBs for reading packets to save bus bandwidth */ + unlink_urbs(dev, &dev->rxq); + + /* + * tx_timeout will unlink URBs for sending packets and + * tx queue is stopped by netcore after link becomes off + */ + } else { + /* submitting URBs for reading packets */ + tasklet_schedule(&dev->bh); + } + + clear_bit(EVENT_LINK_CHANGE, &dev->flags); +} + /* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, @@ -1035,8 +1056,14 @@ skip_reset: } else { usb_autopm_put_interface(dev->intf); } + + /* handle link change from link resetting */ + __handle_link_change(dev); } + if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) + __handle_link_change(dev); + if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); } @@ -1286,6 +1313,7 @@ static void usbnet_bh (unsigned long param) // or are we maybe short a few urbs? } else if (netif_running (dev->net) && netif_device_present (dev->net) && + netif_carrier_ok(dev->net) && !timer_pending (&dev->delay) && !test_bit (EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; @@ -1663,6 +1691,8 @@ void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) if (need_reset && link) usbnet_defer_kevent(dev, EVENT_LINK_RESET); + else + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); } EXPORT_SYMBOL(usbnet_link_change); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index eb021b8f53bd..da46327fca17 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -72,6 +72,7 @@ struct usbnet { # define EVENT_DEVICE_REPORT_IDLE 8 # define EVENT_NO_RUNTIME_PM 9 # define EVENT_RX_KILL 10 +# define EVENT_LINK_CHANGE 11 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) -- cgit v1.2.3-70-g09d2 From 1640f28f6b839637d9b82a3c4a19120601e59c66 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Thu, 11 Apr 2013 13:28:51 +0200 Subject: brcmfmac: add support for dongle ARM CR4 core Newer WiFi chip use ARM CR4 core to achieve higher performance. Add necessary code for host driver in order to support CR4 core. Reviewed-by: Arend van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | 15 ++- .../net/wireless/brcm80211/brcmfmac/sdio_chip.c | 150 ++++++++++++++++++--- .../net/wireless/brcm80211/brcmfmac/sdio_chip.h | 6 +- include/linux/bcma/bcma.h | 1 + include/linux/bcma/bcma_regs.h | 1 + 5 files changed, 148 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 3147960ad975..d24eb66d324d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -2652,7 +2652,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, struct sdpcm_shared_le sh_le; __le32 addr_le; - shaddr = bus->ramsize - 4; + shaddr = bus->ci->rambase + bus->ramsize - 4; /* * Read last word in socram to determine @@ -3030,10 +3030,11 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus) static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) { - int offset = 0; + int offset; uint len; u8 *memblock = NULL, *memptr; int ret; + u8 idx; brcmf_dbg(INFO, "Enter\n"); @@ -3054,9 +3055,14 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) memptr += (BRCMF_SDALIGN - ((u32)(unsigned long)memblock % BRCMF_SDALIGN)); + offset = bus->ci->rambase; + /* Download image */ - while ((len = - brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) { + len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus); + idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4); + if (BRCMF_MAX_CORENUM != idx) + memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec)); + while (len) { ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len); if (ret) { brcmf_err("error %d on writing %d membytes at 0x%08x\n", @@ -3065,6 +3071,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) } offset += MEMBLOCK; + len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus); } err: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c index 9818598f30ea..5db985cb0e0a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c @@ -52,6 +52,9 @@ #define CIB_REV_MASK 0xff000000 #define CIB_REV_SHIFT 24 +/* ARM CR4 core specific control flag bits */ +#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020 + #define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) /* SDIO Pad drive strength to select value mappings */ struct sdiod_drive_str { @@ -149,7 +152,7 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev, static void brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) + struct chip_info *ci, u16 coreid, u32 core_bits) { u32 regdata, base; u8 idx; @@ -235,7 +238,7 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev, static void brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) + struct chip_info *ci, u16 coreid, u32 core_bits) { u8 idx; u32 regdata; @@ -249,19 +252,36 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev, if ((regdata & BCMA_RESET_CTL_RESET) != 0) return; - brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 0, NULL); - regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + /* ensure no pending backplane operation + * 300uc should be sufficient for backplane ops to be finish + * extra 10ms is taken into account for firmware load stage + * after 10300us carry on disabling the core anyway + */ + SPINWAIT(brcmf_sdio_regrl(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_RESET_ST, + NULL), 10300); + regdata = brcmf_sdio_regrl(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_RESET_ST, NULL); - udelay(10); + if (regdata) + brcmf_err("disabling core 0x%x with reset status %x\n", + coreid, regdata); brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, BCMA_RESET_CTL_RESET, NULL); udelay(1); + + brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + core_bits, NULL); + regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, + NULL); + usleep_range(10, 20); + } static void brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) + struct chip_info *ci, u16 coreid, u32 core_bits) { u32 regdata; u8 idx; @@ -272,7 +292,7 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, * Must do the disable sequence first to work for * arbitrary current core state. */ - brcmf_sdio_sb_coredisable(sdiodev, ci, coreid); + brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, 0); /* * Now do the initialization sequence. @@ -325,7 +345,7 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, static void brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid) + struct chip_info *ci, u16 coreid, u32 core_bits) { u8 idx; u32 regdata; @@ -333,28 +353,67 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev, idx = brcmf_sdio_chip_getinfidx(ci, coreid); /* must disable first to work for arbitrary current core state */ - brcmf_sdio_ai_coredisable(sdiodev, ci, coreid); + brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits); /* now do initialization sequence */ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, - BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL); + core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL); regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, NULL); brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, 0, NULL); + regdata = brcmf_sdio_regrl(sdiodev, + ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, + NULL); udelay(1); brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, - BCMA_IOCTL_CLK, NULL); + core_bits | BCMA_IOCTL_CLK, NULL); regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, NULL); udelay(1); } +#ifdef DEBUG +/* safety check for chipinfo */ +static int brcmf_sdio_chip_cichk(struct chip_info *ci) +{ + u8 core_idx; + + /* check RAM core presence for ARM CM3 core */ + core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3); + if (BRCMF_MAX_CORENUM != core_idx) { + core_idx = brcmf_sdio_chip_getinfidx(ci, + BCMA_CORE_INTERNAL_MEM); + if (BRCMF_MAX_CORENUM == core_idx) { + brcmf_err("RAM core not provided with ARM CM3 core\n"); + return -ENODEV; + } + } + + /* check RAM base for ARM CR4 core */ + core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4); + if (BRCMF_MAX_CORENUM != core_idx) { + if (ci->rambase == 0) { + brcmf_err("RAM base not provided with ARM CR4 core\n"); + return -ENOMEM; + } + } + + return 0; +} +#else /* DEBUG */ +static inline int brcmf_sdio_chip_cichk(struct chip_info *ci) +{ + return 0; +} +#endif + static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, u32 regs) { u32 regdata; + int ret; /* Get CC core rev * Chipid is assume to be at offset 0 from regs arg @@ -439,6 +498,10 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, return -ENODEV; } + ret = brcmf_sdio_chip_cichk(ci); + if (ret) + return ret; + switch (ci->socitype) { case SOCI_SB: ci->iscoreup = brcmf_sdio_sb_iscoreup; @@ -538,7 +601,7 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev, * Make sure any on-chip ARM is off (in case strapping is wrong), * or downloaded code was already running. */ - ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3); + ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0); } int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, @@ -701,7 +764,7 @@ static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev, u32 token; __le32 token_le; - nvram_addr = (ci->ramsize - 4) - nvram_sz; + nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase; /* Write the vars list */ err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz); @@ -728,7 +791,7 @@ static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev, nvram_addr, nvram_sz, token); /* Write the length token to the last word */ - if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4), + if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase), (u8 *)&token_le, 4)) return false; @@ -741,8 +804,8 @@ brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev, { u32 zeros = 0; - ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3); - ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM); + ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0); + ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0); /* clear length token */ brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4); @@ -769,7 +832,41 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, reg_addr += offsetof(struct sdpcmd_regs, intstatus); brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL); - ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3); + ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0); + + return true; +} + +static inline void +brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev, + struct chip_info *ci) +{ + ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, + ARMCR4_BCMA_IOCTL_CPUHALT); +} + +static bool +brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, + char *nvram_dat, uint nvram_sz) +{ + u8 core_idx; + u32 reg_addr; + + if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz)) + return false; + + /* clear all interrupts */ + core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV); + reg_addr = ci->c_inf[core_idx].base; + reg_addr += offsetof(struct sdpcmd_regs, intstatus); + brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL); + + /* Write reset vector to address 0 */ + brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec, + sizeof(ci->rst_vec)); + + /* restore ARM */ + ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0); return true; } @@ -777,12 +874,27 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci) { - brcmf_sdio_chip_cm3_enterdl(sdiodev, ci); + u8 arm_core_idx; + + arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3); + if (BRCMF_MAX_CORENUM != arm_core_idx) { + brcmf_sdio_chip_cm3_enterdl(sdiodev, ci); + return; + } + + brcmf_sdio_chip_cr4_enterdl(sdiodev, ci); } bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, char *nvram_dat, uint nvram_sz) { - return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat, nvram_sz); + u8 arm_core_idx; + + arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3); + if (BRCMF_MAX_CORENUM != arm_core_idx) + return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat, + nvram_sz); + + return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, nvram_dat, nvram_sz); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h index 2123ea71d127..83c041f1bf4a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h @@ -73,15 +73,17 @@ struct chip_info { u32 pmurev; u32 pmucaps; u32 ramsize; + u32 rambase; + u32 rst_vec; /* reset vertor for ARM CR4 core */ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, u16 coreid); u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, u16 coreid); void (*coredisable)(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid); + struct chip_info *ci, u16 coreid, u32 core_bits); void (*resetcore)(struct brcmf_sdio_dev *sdiodev, - struct chip_info *ci, u16 coreid); + struct chip_info *ci, u16 coreid, u32 core_bits); }; struct sbconfig { diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0ab6712fd76b..f14a98a79c9d 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -134,6 +134,7 @@ struct bcma_host_ops { #define BCMA_CORE_I2S 0x834 #define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ #define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ +#define BCMA_CORE_ARM_CR4 0x83e #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index 7e8104bb7a7e..917dcd7965e7 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -37,6 +37,7 @@ #define BCMA_IOST_BIST_DONE 0x8000 #define BCMA_RESET_CTL 0x0800 #define BCMA_RESET_CTL_RESET 0x0001 +#define BCMA_RESET_ST 0x0804 /* BCMA PCI config space registers. */ #define BCMA_PCI_PMCSR 0x44 -- cgit v1.2.3-70-g09d2 From 1e9ab4dd258ecbb0f1c377fd4dbe227cdb93d9bd Mon Sep 17 00:00:00 2001 From: Piotr Haber Date: Thu, 11 Apr 2013 13:28:52 +0200 Subject: brcmfmac: setup SDIO reset behavior Set device in a manner that SDIO I/O card reset will lead to WLAN backplane and PMU state reset. Reviewed-by: Hante Meuleman Reviewed-by: Arend van Spriel Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky (Zhenhui) Lin Signed-off-by: Piotr Haber Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | 38 ++++++++++++++++++---- .../net/wireless/brcm80211/brcmfmac/sdio_host.h | 2 ++ include/linux/bcma/bcma_driver_chipcommon.h | 3 ++ 3 files changed, 36 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index d24eb66d324d..c06bb083c459 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -3635,7 +3635,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) int err = 0; int reg_addr; u32 reg_val; - u8 idx; bus->alp_only = true; @@ -3686,12 +3685,37 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) goto fail; } - /* Set core control so an SDIO reset does a backplane reset */ - idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); - reg_addr = bus->ci->c_inf[idx].base + - offsetof(struct sdpcmd_regs, corecontrol); - reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL); - brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL); + /* Set card control so an SDIO card reset does a WLAN backplane reset */ + reg_val = brcmf_sdio_regrb(bus->sdiodev, + SDIO_CCCR_BRCM_CARDCTRL, &err); + if (err) + goto fail; + + reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET; + + brcmf_sdio_regwb(bus->sdiodev, + SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); + if (err) + goto fail; + + /* set PMUControl so a backplane reset does PMU state reload */ + reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base, + pmucontrol); + reg_val = brcmf_sdio_regrl(bus->sdiodev, + reg_addr, + &err); + if (err) + goto fail; + + reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT); + + brcmf_sdio_regwl(bus->sdiodev, + reg_addr, + reg_val, + &err); + if (err) + goto fail; + sdio_release_host(bus->sdiodev->func[1]); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index b9b397b59965..28ed3cc9f35a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -52,6 +52,8 @@ #define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 #define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 #define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 +#define SDIO_CCCR_BRCM_CARDCTRL 0xf1 +#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET 0x02 #define SDIO_CCCR_BRCM_SEPINT 0xf2 #define SDIO_SEPINT_MASK 0x01 diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 453fcc914683..b8b09eac60a4 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -316,6 +316,9 @@ #define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ #define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define BCMA_CC_PMU_CTL_RES 0x00006000 /* reset control mask */ +#define BCMA_CC_PMU_CTL_RES_SHIFT 13 +#define BCMA_CC_PMU_CTL_RES_RELOAD 0x2 /* reload POR values */ #define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 #define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ -- cgit v1.2.3-70-g09d2 From 668761ac01d6f5a36b8e5a24d4e154550e2c4c3b Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Fri, 12 Apr 2013 10:55:55 +0200 Subject: brcmfmac: define and use platform specific data for SDIO. This patch adds support for platform specific data for SDIO fullmac devices. Currently OOB interrupts are configured by Kconfig BRCMFMAC_SDIO_OOB but that is now determined dynamically by checking availibility of platform data. Cc: Hauke Mehrtens Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Piotr Haber Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/Kconfig | 9 -- drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | 155 +++++++++++---------- .../net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | 114 +++++---------- drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | 29 ++-- .../net/wireless/brcm80211/brcmfmac/sdio_host.h | 6 +- include/linux/platform_data/brcmfmac-sdio.h | 124 +++++++++++++++++ 6 files changed, 250 insertions(+), 187 deletions(-) create mode 100644 include/linux/platform_data/brcmfmac-sdio.h (limited to 'include/linux') diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index 747e9317dabd..fc8a0fa6d3b2 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -37,15 +37,6 @@ config BRCMFMAC_SDIO IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to use the driver for a SDIO wireless card. -config BRCMFMAC_SDIO_OOB - bool "Out of band interrupt support for SDIO interface chipset" - depends on BRCMFMAC_SDIO - ---help--- - This option enables out-of-band interrupt support for Broadcom - SDIO Wifi chipset using fullmac in order to gain better - performance and deep sleep wake up capability on certain - platforms. Say N if you are unsure. - config BRCMFMAC_USB bool "USB bus interface support for FullMAC driver" depends on USB diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index aa51f370be49..4891e3df2058 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -37,16 +38,15 @@ #define SDIOH_API_ACCESS_RETRY_LIMIT 2 -#ifdef CONFIG_BRCMFMAC_SDIO_OOB -static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id) + +static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id) { struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - brcmf_dbg(INTR, "oob intr triggered\n"); + brcmf_dbg(INTR, "OOB intr triggered\n"); - /* - * out-of-band interrupt is level-triggered which won't + /* out-of-band interrupt is level-triggered which won't * be cleared until dpc */ if (sdiodev->irq_en) { @@ -59,72 +59,12 @@ static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id) return IRQ_HANDLED; } -int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) -{ - int ret = 0; - u8 data; - unsigned long flags; - - brcmf_dbg(SDIO, "Entering: irq %d\n", sdiodev->irq); - - ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler, - sdiodev->irq_flags, "brcmf_oob_intr", - &sdiodev->func[1]->dev); - if (ret != 0) - return ret; - spin_lock_init(&sdiodev->irq_en_lock); - spin_lock_irqsave(&sdiodev->irq_en_lock, flags); - sdiodev->irq_en = true; - spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); - - ret = enable_irq_wake(sdiodev->irq); - if (ret != 0) - return ret; - sdiodev->irq_wake = true; - - sdio_claim_host(sdiodev->func[1]); - - /* must configure SDIO_CCCR_IENx to enable irq */ - data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret); - data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; - brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); - - /* redirect, configure and enable io for interrupt signal */ - data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; - if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH) - data |= SDIO_SEPINT_ACT_HI; - brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); - - sdio_release_host(sdiodev->func[1]); - - return 0; -} - -int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev) -{ - brcmf_dbg(SDIO, "Entering\n"); - - sdio_claim_host(sdiodev->func[1]); - brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); - brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); - sdio_release_host(sdiodev->func[1]); - - if (sdiodev->irq_wake) { - disable_irq_wake(sdiodev->irq); - sdiodev->irq_wake = false; - } - free_irq(sdiodev->irq, &sdiodev->func[1]->dev); - sdiodev->irq_en = false; - - return 0; -} -#else /* CONFIG_BRCMFMAC_SDIO_OOB */ -static void brcmf_sdio_irqhandler(struct sdio_func *func) +static void brcmf_sdio_ib_irqhandler(struct sdio_func *func) { struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - brcmf_dbg(INTR, "ib intr triggered\n"); + brcmf_dbg(INTR, "IB intr triggered\n"); brcmf_sdbrcm_isr(sdiodev->bus); } @@ -136,12 +76,56 @@ static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func) int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) { - brcmf_dbg(SDIO, "Entering\n"); + int ret = 0; + u8 data; + unsigned long flags; - sdio_claim_host(sdiodev->func[1]); - sdio_claim_irq(sdiodev->func[1], brcmf_sdio_irqhandler); - sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler); - sdio_release_host(sdiodev->func[1]); + if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { + brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n", + sdiodev->pdata->oob_irq_nr); + ret = request_irq(sdiodev->pdata->oob_irq_nr, + brcmf_sdio_oob_irqhandler, + sdiodev->pdata->oob_irq_flags, + "brcmf_oob_intr", + &sdiodev->func[1]->dev); + if (ret != 0) { + brcmf_err("request_irq failed %d\n", ret); + return ret; + } + sdiodev->oob_irq_requested = true; + spin_lock_init(&sdiodev->irq_en_lock); + spin_lock_irqsave(&sdiodev->irq_en_lock, flags); + sdiodev->irq_en = true; + spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); + + ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr); + if (ret != 0) { + brcmf_err("enable_irq_wake failed %d\n", ret); + return ret; + } + sdiodev->irq_wake = true; + + sdio_claim_host(sdiodev->func[1]); + + /* must configure SDIO_CCCR_IENx to enable irq */ + data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret); + data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; + brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); + + /* redirect, configure and enable io for interrupt signal */ + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; + if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) + data |= SDIO_SEPINT_ACT_HI; + brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); + + sdio_release_host(sdiodev->func[1]); + } else { + brcmf_dbg(SDIO, "Entering\n"); + sdio_claim_host(sdiodev->func[1]); + sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler); + sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler); + sdio_release_host(sdiodev->func[1]); + } return 0; } @@ -150,14 +134,31 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev) { brcmf_dbg(SDIO, "Entering\n"); - sdio_claim_host(sdiodev->func[1]); - sdio_release_irq(sdiodev->func[2]); - sdio_release_irq(sdiodev->func[1]); - sdio_release_host(sdiodev->func[1]); + if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { + sdio_claim_host(sdiodev->func[1]); + brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); + brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); + sdio_release_host(sdiodev->func[1]); + + if (sdiodev->oob_irq_requested) { + sdiodev->oob_irq_requested = false; + if (sdiodev->irq_wake) { + disable_irq_wake(sdiodev->pdata->oob_irq_nr); + sdiodev->irq_wake = false; + } + free_irq(sdiodev->pdata->oob_irq_nr, + &sdiodev->func[1]->dev); + sdiodev->irq_en = false; + } + } else { + sdio_claim_host(sdiodev->func[1]); + sdio_release_irq(sdiodev->func[2]); + sdio_release_irq(sdiodev->func[1]); + sdio_release_host(sdiodev->func[1]); + } return 0; } -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ int brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index b1ea1036e825..44fa0cdbf97b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -26,6 +26,7 @@ #include /* request_irq() */ #include #include +#include #include #include @@ -62,14 +63,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); -#ifdef CONFIG_BRCMFMAC_SDIO_OOB -static struct list_head oobirq_lh; -struct brcmf_sdio_oobirq { - unsigned int irq; - unsigned long flags; - struct list_head list; -}; -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ +static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; + static bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) @@ -428,33 +423,6 @@ void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev) } -#ifdef CONFIG_BRCMFMAC_SDIO_OOB -static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev) -{ - struct brcmf_sdio_oobirq *oobirq_entry; - - if (list_empty(&oobirq_lh)) { - brcmf_err("no valid oob irq resource\n"); - return -ENXIO; - } - - oobirq_entry = list_first_entry(&oobirq_lh, struct brcmf_sdio_oobirq, - list); - - sdiodev->irq = oobirq_entry->irq; - sdiodev->irq_flags = oobirq_entry->flags; - list_del(&oobirq_entry->list); - kfree(oobirq_entry); - - return 0; -} -#else -static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev) -{ - return 0; -} -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ - static int brcmf_ops_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -495,15 +463,13 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, dev_set_drvdata(&func->dev, bus_if); dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); sdiodev->dev = &sdiodev->func[1]->dev; + sdiodev->pdata = brcmfmac_sdio_pdata; atomic_set(&sdiodev->suspend, false); init_waitqueue_head(&sdiodev->request_byte_wait); init_waitqueue_head(&sdiodev->request_word_wait); init_waitqueue_head(&sdiodev->request_chain_wait); init_waitqueue_head(&sdiodev->request_buffer_wait); - err = brcmf_sdio_getintrcfg(sdiodev); - if (err) - goto fail; brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n"); err = brcmf_sdio_probe(sdiodev); @@ -598,7 +564,7 @@ static const struct dev_pm_ops brcmf_sdio_pm_ops = { static struct sdio_driver brcmf_sdmmc_driver = { .probe = brcmf_ops_sdio_probe, .remove = brcmf_ops_sdio_remove, - .name = "brcmfmac", + .name = BRCMFMAC_SDIO_PDATA_NAME, .id_table = brcmf_sdmmc_ids, #ifdef CONFIG_PM_SLEEP .drv = { @@ -607,72 +573,51 @@ static struct sdio_driver brcmf_sdmmc_driver = { #endif /* CONFIG_PM_SLEEP */ }; -#ifdef CONFIG_BRCMFMAC_SDIO_OOB static int brcmf_sdio_pd_probe(struct platform_device *pdev) { - struct resource *res; - struct brcmf_sdio_oobirq *oobirq_entry; - int i, ret; + int ret; - INIT_LIST_HEAD(&oobirq_lh); + brcmf_dbg(SDIO, "Enter\n"); - for (i = 0; ; i++) { - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) - break; + brcmfmac_sdio_pdata = pdev->dev.platform_data; - oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), - GFP_KERNEL); - if (!oobirq_entry) - return -ENOMEM; - oobirq_entry->irq = res->start; - oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK; - list_add_tail(&oobirq_entry->list, &oobirq_lh); - } - if (i == 0) - return -ENXIO; + if (brcmfmac_sdio_pdata->power_on) + brcmfmac_sdio_pdata->power_on(); ret = sdio_register_driver(&brcmf_sdmmc_driver); - if (ret) brcmf_err("sdio_register_driver failed: %d\n", ret); return ret; } -static struct platform_driver brcmf_sdio_pd = { - .probe = brcmf_sdio_pd_probe, - .driver = { - .name = "brcmf_sdio_pd" - } -}; - -void brcmf_sdio_exit(void) +static int brcmf_sdio_pd_remove(struct platform_device *pdev) { brcmf_dbg(SDIO, "Enter\n"); + if (brcmfmac_sdio_pdata->power_off) + brcmfmac_sdio_pdata->power_off(); + sdio_unregister_driver(&brcmf_sdmmc_driver); - platform_driver_unregister(&brcmf_sdio_pd); + return 0; } -void brcmf_sdio_init(void) -{ - int ret; - - brcmf_dbg(SDIO, "Enter\n"); - - ret = platform_driver_register(&brcmf_sdio_pd); +static struct platform_driver brcmf_sdio_pd = { + .remove = brcmf_sdio_pd_remove, + .driver = { + .name = BRCMFMAC_SDIO_PDATA_NAME + } +}; - if (ret) - brcmf_err("platform_driver_register failed: %d\n", ret); -} -#else void brcmf_sdio_exit(void) { brcmf_dbg(SDIO, "Enter\n"); - sdio_unregister_driver(&brcmf_sdmmc_driver); + if (brcmfmac_sdio_pdata) + platform_driver_unregister(&brcmf_sdio_pd); + else + sdio_unregister_driver(&brcmf_sdmmc_driver); } void brcmf_sdio_init(void) @@ -681,9 +626,12 @@ void brcmf_sdio_init(void) brcmf_dbg(SDIO, "Enter\n"); - ret = sdio_register_driver(&brcmf_sdmmc_driver); + ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); + if (ret == -ENODEV) { + brcmf_dbg(SDIO, "No platform data available, registering without.\n"); + ret = sdio_register_driver(&brcmf_sdmmc_driver); + } if (ret) - brcmf_err("sdio_register_driver failed: %d\n", ret); + brcmf_err("driver registration failed: %d\n", ret); } -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index fd697ce3683c..d2487518bd2a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -517,7 +518,7 @@ static int qcount[NUMPRIO]; static int tx_packets[NUMPRIO]; #endif /* DEBUG */ -#define SDIO_DRIVE_STRENGTH 6 /* in milliamps */ +#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */ #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) @@ -2046,23 +2047,19 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev) bus->tx_seq = bus->rx_seq = 0; } -#ifdef CONFIG_BRCMFMAC_SDIO_OOB static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) { unsigned long flags; - spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); - if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) { - enable_irq(bus->sdiodev->irq); - bus->sdiodev->irq_en = true; + if (bus->sdiodev->oob_irq_requested) { + spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); + if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) { + enable_irq(bus->sdiodev->pdata->oob_irq_nr); + bus->sdiodev->irq_en = true; + } + spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags); } - spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags); -} -#else -static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) -{ } -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus) { @@ -3639,6 +3636,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) int err = 0; int reg_addr; u32 reg_val; + u32 drivestrength; bus->alp_only = true; @@ -3679,8 +3677,11 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) goto fail; } - brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, - SDIO_DRIVE_STRENGTH); + if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength)) + drivestrength = bus->sdiodev->pdata->drive_strength; + else + drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; + brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength); /* Get info on the SOCRAM cores... */ bus->ramsize = bus->ci->ramsize; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 28ed3cc9f35a..7c1b6332747e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -174,13 +174,11 @@ struct brcmf_sdio_dev { wait_queue_head_t request_buffer_wait; struct device *dev; struct brcmf_bus *bus_if; -#ifdef CONFIG_BRCMFMAC_SDIO_OOB - unsigned int irq; /* oob interrupt number */ - unsigned long irq_flags; /* board specific oob flags */ + struct brcmfmac_sdio_platform_data *pdata; + bool oob_irq_requested; bool irq_en; /* irq enable flags */ spinlock_t irq_en_lock; bool irq_wake; /* irq wake enable flags */ -#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ }; /* Register/deregister interrupt handler. */ diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h new file mode 100644 index 000000000000..1ade657d5fc1 --- /dev/null +++ b/include/linux/platform_data/brcmfmac-sdio.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_BRCMFMAC_PLATFORM_H +#define _LINUX_BRCMFMAC_PLATFORM_H + +/* + * Platform specific driver functions and data. Through the platform specific + * device data functions can be provided to help the brcmfmac driver to + * operate with the device in combination with the used platform. + * + * Use the platform data in the following (similar) way: + * + * +#include + + +static void brcmfmac_power_on(void) +{ +} + +static void brcmfmac_power_off(void) +{ +} + +static void brcmfmac_reset(void) +{ +} + +static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = { + .power_on = brcmfmac_power_on, + .power_off = brcmfmac_power_off, + .reset = brcmfmac_reset +}; + +static struct platform_device brcmfmac_device = { + .name = BRCMFMAC_SDIO_PDATA_NAME, + .id = PLATFORM_DEVID_NONE, + .dev.platform_data = &brcmfmac_sdio_pdata +}; + +void __init brcmfmac_init_pdata(void) +{ + brcmfmac_sdio_pdata.oob_irq_supported = true; + brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB); + brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ | + IORESOURCE_IRQ_HIGHLEVEL; + platform_device_register(&brcmfmac_device); +} + * + * + * Note: the brcmfmac can be loaded as module or be statically built-in into + * the kernel. If built-in then do note that it uses module_init (and + * module_exit) routines which equal device_initcall. So if you intend to + * create a module with the platform specific data for the brcmfmac and have + * it built-in to the kernel then use a higher initcall then device_initcall + * (see init.h). If this is not done then brcmfmac will load without problems + * but will not pickup the platform data. + * + * When the driver does not "detect" platform driver data then it will continue + * without reporting anything and just assume there is no data needed. Which is + * probably true for most platforms. + * + * Explanation of the platform_data fields: + * + * drive_strength: is the preferred drive_strength to be used for the SDIO + * pins. If 0 then a default value will be used. This is the target drive + * strength, the exact drive strength which will be used depends on the + * capabilities of the device. + * + * oob_irq_supported: does the board have support for OOB interrupts. SDIO + * in-band interrupts are relatively slow and for having less overhead on + * interrupt processing an out of band interrupt can be used. If the HW + * supports this then enable this by setting this field to true and configure + * the oob related fields. + * + * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are + * used for registering the irq using request_irq function. + * + * power_on: This function is called by the brcmfmac when the module gets + * loaded. This can be particularly useful for low power devices. The platform + * spcific routine may for example decide to power up the complete device. + * If there is no use-case for this function then provide NULL. + * + * power_off: This function is called by the brcmfmac when the module gets + * unloaded. At this point the device can be powered down or otherwise be reset. + * So if an actual power_off is not supported but reset is then reset the device + * when this function gets called. This can be particularly useful for low power + * devices. If there is no use-case for this function (either power-down or + * reset) then provide NULL. + * + * reset: This function can get called if the device communication broke down. + * This functionality is particularly useful in case of SDIO type devices. It is + * possible to reset a dongle via sdio data interface, but it requires that + * this is fully functional. This function is chip/module specific and this + * function should return only after the complete reset has completed. + */ + +#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio" + +struct brcmfmac_sdio_platform_data { + unsigned int drive_strength; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + void (*power_on)(void); + void (*power_off)(void); + void (*reset)(void); +}; + +#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ -- cgit v1.2.3-70-g09d2 From 4cd729b04285b7330edaf5a7080aa795d6d15ff3 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Mon, 15 Apr 2013 09:54:25 +0000 Subject: net: add dev_uc_sync_multiple() and dev_mc_sync_multiple() api The current implementation of dev_uc_sync/unsync() assumes that there is a strict 1-to-1 relationship between the source and destination of the sync. In other words, once an address has been synced to a destination device, it will not be synced to any other device through the sync API. However, there are some virtual devices that aggreate a number of lower devices and need to sync addresses to all of them. The current API falls short there. This patch introduces a new dev_uc_sync_multiple() api that can be called in the above circumstances and allows sync to work for every invocation. CC: Jiri Pirko Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 + net/core/dev_addr_lists.c | 210 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 171 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 53d3939358a7..623b57b52195 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -209,6 +209,7 @@ struct netdev_hw_addr { #define NETDEV_HW_ADDR_T_UNICAST 4 #define NETDEV_HW_ADDR_T_MULTICAST 5 bool global_use; + int sync_cnt; int refcount; int synced; struct rcu_head rcu_head; @@ -2627,6 +2628,7 @@ extern int dev_uc_add(struct net_device *dev, const unsigned char *addr); extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); extern int dev_uc_del(struct net_device *dev, const unsigned char *addr); extern int dev_uc_sync(struct net_device *to, struct net_device *from); +extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); extern void dev_uc_unsync(struct net_device *to, struct net_device *from); extern void dev_uc_flush(struct net_device *dev); extern void dev_uc_init(struct net_device *dev); @@ -2638,6 +2640,7 @@ extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); extern int dev_mc_del(struct net_device *dev, const unsigned char *addr); extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); extern int dev_mc_sync(struct net_device *to, struct net_device *from); +extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); extern void dev_mc_unsync(struct net_device *to, struct net_device *from); extern void dev_mc_flush(struct net_device *dev); extern void dev_mc_init(struct net_device *dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index abdc9e6ef33e..c013f38482a1 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -22,7 +22,8 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, - unsigned char addr_type, bool global) + unsigned char addr_type, bool global, + bool sync) { struct netdev_hw_addr *ha; int alloc_size; @@ -37,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, ha->type = addr_type; ha->refcount = 1; ha->global_use = global; - ha->synced = 0; + ha->synced = sync; list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -46,7 +47,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, - unsigned char addr_type, bool global) + unsigned char addr_type, bool global, bool sync) { struct netdev_hw_addr *ha; @@ -63,43 +64,62 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, else ha->global_use = true; } + if (sync) { + if (ha->synced) + return 0; + else + ha->synced = true; + } ha->refcount++; return 0; } } - return __hw_addr_create_ex(list, addr, addr_len, addr_type, global); + return __hw_addr_create_ex(list, addr, addr_len, addr_type, global, + sync); } static int __hw_addr_add(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, unsigned char addr_type) { - return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); + return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false); +} + +static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, + struct netdev_hw_addr *ha, bool global, + bool sync) +{ + if (global && !ha->global_use) + return -ENOENT; + + if (sync && !ha->synced) + return -ENOENT; + + if (global) + ha->global_use = false; + + if (sync) + ha->synced = false; + + if (--ha->refcount) + return 0; + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + return 0; } static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, - unsigned char addr_type, bool global) + unsigned char addr_type, bool global, bool sync) { struct netdev_hw_addr *ha; list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && - (ha->type == addr_type || !addr_type)) { - if (global) { - if (!ha->global_use) - break; - else - ha->global_use = false; - } - if (--ha->refcount) - return 0; - list_del_rcu(&ha->list); - kfree_rcu(ha, rcu_head); - list->count--; - return 0; - } + (ha->type == addr_type || !addr_type)) + return __hw_addr_del_entry(list, ha, global, sync); } return -ENOENT; } @@ -108,7 +128,57 @@ static int __hw_addr_del(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, unsigned char addr_type) { - return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); + return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false); +} + +static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr *ha, + int addr_len) +{ + int err; + + err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, + false, true); + if (err) + return err; + ha->sync_cnt++; + ha->refcount++; + + return 0; +} + +static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + struct netdev_hw_addr *ha, + int addr_len) +{ + int err; + + err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type, + false, true); + if (err) + return; + ha->sync_cnt--; + __hw_addr_del_entry(from_list, ha, false, true); +} + +static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len) +{ + int err = 0; + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &from_list->list, list) { + if (ha->sync_cnt == ha->refcount) { + __hw_addr_unsync_one(to_list, from_list, ha, addr_len); + } else { + err = __hw_addr_sync_one(to_list, ha, addr_len); + if (err) + break; + } + } + return err; } int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, @@ -152,6 +222,11 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, } EXPORT_SYMBOL(__hw_addr_del_multiple); +/* This function only works where there is a strict 1-1 relationship + * between source and destionation of they synch. If you ever need to + * sync addresses to more then 1 destination, you need to use + * __hw_addr_sync_multiple(). + */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len) @@ -160,17 +235,12 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr *ha, *tmp; list_for_each_entry_safe(ha, tmp, &from_list->list, list) { - if (!ha->synced) { - err = __hw_addr_add(to_list, ha->addr, - addr_len, ha->type); + if (!ha->sync_cnt) { + err = __hw_addr_sync_one(to_list, ha, addr_len); if (err) break; - ha->synced++; - ha->refcount++; - } else if (ha->refcount == 1) { - __hw_addr_del(to_list, ha->addr, addr_len, ha->type); - __hw_addr_del(from_list, ha->addr, addr_len, ha->type); - } + } else if (ha->refcount == 1) + __hw_addr_unsync_one(to_list, from_list, ha, addr_len); } return err; } @@ -183,13 +253,8 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr *ha, *tmp; list_for_each_entry_safe(ha, tmp, &from_list->list, list) { - if (ha->synced) { - __hw_addr_del(to_list, ha->addr, - addr_len, ha->type); - ha->synced--; - __hw_addr_del(from_list, ha->addr, - addr_len, ha->type); - } + if (ha->sync_cnt) + __hw_addr_unsync_one(to_list, from_list, ha, addr_len); } } EXPORT_SYMBOL(__hw_addr_unsync); @@ -406,7 +471,7 @@ int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr) } } err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_UNICAST, true); + NETDEV_HW_ADDR_T_UNICAST, true, false); if (!err) __dev_set_rx_mode(dev); out: @@ -469,7 +534,8 @@ EXPORT_SYMBOL(dev_uc_del); * locked by netif_addr_lock_bh. * * This function is intended to be called from the dev->set_rx_mode - * function of layered software devices. + * function of layered software devices. This function assumes that + * addresses will only ever be synced to the @to devices and no other. */ int dev_uc_sync(struct net_device *to, struct net_device *from) { @@ -487,6 +553,36 @@ int dev_uc_sync(struct net_device *to, struct net_device *from) } EXPORT_SYMBOL(dev_uc_sync); +/** + * dev_uc_sync_multiple - Synchronize device's unicast list to another + * device, but allow for multiple calls to sync to multiple devices. + * @to: destination device + * @from: source device + * + * Add newly added addresses to the destination device and release + * addresses that have been deleted from the source. The source device + * must be locked by netif_addr_lock_bh. + * + * This function is intended to be called from the dev->set_rx_mode + * function of layered software devices. It allows for a single source + * device to be synced to multiple destination devices. + */ +int dev_uc_sync_multiple(struct net_device *to, struct net_device *from) +{ + int err = 0; + + if (to->addr_len != from->addr_len) + return -EINVAL; + + netif_addr_lock_nested(to); + err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); + if (!err) + __dev_set_rx_mode(to); + netif_addr_unlock(to); + return err; +} +EXPORT_SYMBOL(dev_uc_sync_multiple); + /** * dev_uc_unsync - Remove synchronized addresses from the destination device * @to: destination device @@ -559,7 +655,7 @@ int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr) } } err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_MULTICAST, true); + NETDEV_HW_ADDR_T_MULTICAST, true, false); if (!err) __dev_set_rx_mode(dev); out: @@ -575,7 +671,7 @@ static int __dev_mc_add(struct net_device *dev, const unsigned char *addr, netif_addr_lock_bh(dev); err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_MULTICAST, global); + NETDEV_HW_ADDR_T_MULTICAST, global, false); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); @@ -615,7 +711,7 @@ static int __dev_mc_del(struct net_device *dev, const unsigned char *addr, netif_addr_lock_bh(dev); err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_MULTICAST, global); + NETDEV_HW_ADDR_T_MULTICAST, global, false); if (!err) __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); @@ -678,6 +774,36 @@ int dev_mc_sync(struct net_device *to, struct net_device *from) } EXPORT_SYMBOL(dev_mc_sync); +/** + * dev_mc_sync_multiple - Synchronize device's unicast list to another + * device, but allow for multiple calls to sync to multiple devices. + * @to: destination device + * @from: source device + * + * Add newly added addresses to the destination device and release + * addresses that have no users left. The source device must be + * locked by netif_addr_lock_bh. + * + * This function is intended to be called from the ndo_set_rx_mode + * function of layered software devices. It allows for a single + * source device to be synced to multiple destination devices. + */ +int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) +{ + int err = 0; + + if (to->addr_len != from->addr_len) + return -EINVAL; + + netif_addr_lock_nested(to); + err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); + if (!err) + __dev_set_rx_mode(to); + netif_addr_unlock(to); + return err; +} +EXPORT_SYMBOL(dev_mc_sync_multiple); + /** * dev_mc_unsync - Remove synchronized addresses from the destination device * @to: destination device -- cgit v1.2.3-70-g09d2 From 37799e52a29af2268d1fbe18908a0d6b9f68af88 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Mar 2013 14:02:26 +0100 Subject: mac80211: unify CSA action frame/beacon processing CSA action frame content should be processed as variable IEs rather than fixed to make it extensible. Unify the code and process them just like CSA in beacons to make it easier to extend for HT/VHT. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 4 +-- net/mac80211/ieee80211_i.h | 4 --- net/mac80211/mlme.c | 71 ++++++++++++++++++++++++++++------------------ net/mac80211/rx.c | 4 --- 4 files changed, 44 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index e46fea8b972e..8f80b3a93501 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -840,9 +840,7 @@ struct ieee80211_mgmt { } __packed wme_action; struct{ u8 action_code; - u8 element_id; - u8 length; - struct ieee80211_channel_sw_ie sw_elem; + u8 variable[0]; } __packed chan_switch; struct{ u8 action_code; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8d5dcbf17bbc..373460f9c069 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1252,10 +1252,6 @@ void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); int ieee80211_max_network_latency(struct notifier_block *nb, unsigned long data, void *dummy); int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); -void -ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, - const struct ieee80211_channel_sw_ie *sw_elem, - struct ieee80211_bss *bss, u64 timestamp); void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2a2c45354498..ade3cd6c337d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1020,33 +1020,37 @@ static void ieee80211_chswitch_timer(unsigned long data) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); } -void +static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, - const struct ieee80211_channel_sw_ie *sw_elem, - struct ieee80211_bss *bss, u64 timestamp) + u64 timestamp, struct ieee802_11_elems *elems) { - struct cfg80211_bss *cbss = - container_of((void *)bss, struct cfg80211_bss, priv); - struct ieee80211_channel *new_ch; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, - cbss->channel->band); + struct cfg80211_bss *cbss = ifmgd->associated; + struct ieee80211_bss *bss; + struct ieee80211_channel *new_ch; + int new_freq; struct ieee80211_chanctx *chanctx; ASSERT_MGD_MTX(ifmgd); - if (!ifmgd->associated) + if (!cbss) return; if (sdata->local->scanning) return; - /* Disregard subsequent beacons if we are already running a timer - processing a CSA */ - + /* disregard subsequent announcements if we are already processing */ if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED) return; + if (!elems->ch_switch_ie) + return; + + bss = (void *)cbss->priv; + + new_freq = ieee80211_channel_to_frequency( + elems->ch_switch_ie->new_ch_num, + cbss->channel->band); new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) { sdata_info(sdata, @@ -1086,7 +1090,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->local->csa_channel = new_ch; - if (sw_elem->mode) + if (elems->ch_switch_ie->mode) ieee80211_stop_queues_by_reason(&sdata->local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -1095,9 +1099,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, /* use driver's channel switch callback */ struct ieee80211_channel_switch ch_switch = { .timestamp = timestamp, - .block_tx = sw_elem->mode, + .block_tx = elems->ch_switch_ie->mode, .channel = new_ch, - .count = sw_elem->count, + .count = elems->ch_switch_ie->count, }; drv_channel_switch(sdata->local, &ch_switch); @@ -1105,11 +1109,11 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, } /* channel switch handled in software */ - if (sw_elem->count <= 1) + if (elems->ch_switch_ie->count <= 1) ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); else mod_timer(&ifmgd->chswitch_timer, - TU_TO_EXP_TIME(sw_elem->count * + TU_TO_EXP_TIME(elems->ch_switch_ie->count * cbss->beacon_interval)); } @@ -2655,7 +2659,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, if (bss) ieee80211_rx_bss_put(local, bss); - if (!sdata->u.mgd.associated) + if (!sdata->u.mgd.associated || + !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) return; if (need_ps) { @@ -2664,10 +2669,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->iflist_mtx); } - if (elems->ch_switch_ie && - memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0) - ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie, - bss, rx_status->mactime); + ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems); } @@ -3061,14 +3063,27 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss); break; case IEEE80211_STYPE_ACTION: - switch (mgmt->u.action.category) { - case WLAN_CATEGORY_SPECTRUM_MGMT: + if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { + struct ieee802_11_elems elems; + int ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.chan_switch.variable); + + if (ies_len < 0) + break; + + ieee802_11_parse_elems( + mgmt->u.action.u.chan_switch.variable, + ies_len, &elems); + + if (elems.parse_error) + break; + ieee80211_sta_process_chanswitch(sdata, - &mgmt->u.action.u.chan_switch.sw_elem, - (void *)ifmgd->associated->priv, - rx_status->mactime); - break; + rx_status->mactime, + &elems); } + break; } mutex_unlock(&ifmgd->mtx); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5168f89c754d..e9825f15c14c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2507,10 +2507,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) ieee80211_process_measurement_req(sdata, mgmt, len); goto handled; case WLAN_ACTION_SPCT_CHL_SWITCH: - if (len < (IEEE80211_MIN_ACTION_SIZE + - sizeof(mgmt->u.action.u.chan_switch))) - break; - if (sdata->vif.type != NL80211_IFTYPE_STATION) break; -- cgit v1.2.3-70-g09d2 From b4f286a1c0ad0b84c2d502b354d4d98d5a86c64b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Mar 2013 14:13:58 +0100 Subject: mac80211: support extended channel switch Support extended channel switch when the operating class is one of the global operating classes as defined in Annex E of 802.11-2012. If it isn't, disconnect from the AP instead. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 12 ++++++++ net/mac80211/ieee80211_i.h | 1 + net/mac80211/mlme.c | 77 ++++++++++++++++++++++++++++++---------------- net/mac80211/util.c | 7 +++++ 4 files changed, 71 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 8f80b3a93501..2a10acc65a54 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -672,6 +672,18 @@ struct ieee80211_channel_sw_ie { u8 count; } __packed; +/** + * struct ieee80211_ext_chansw_ie + * + * This structure represents the "Extended Channel Switch Announcement element" + */ +struct ieee80211_ext_chansw_ie { + u8 mode; + u8 new_operating_class; + u8 new_ch_num; + u8 count; +} __packed; + /** * struct ieee80211_tim * diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 373460f9c069..10c3180b165e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1178,6 +1178,7 @@ struct ieee802_11_elems { const u8 *perr; const struct ieee80211_rann_ie *rann; const struct ieee80211_channel_sw_ie *ch_switch_ie; + const struct ieee80211_ext_chansw_ie *ext_chansw_ie; const u8 *country_elem; const u8 *pwr_constr_elem; const struct ieee80211_timeout_interval_ie *timeout_int; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ade3cd6c337d..bc6f87edc624 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1024,56 +1024,79 @@ static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp, struct ieee802_11_elems *elems) { + struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss = ifmgd->associated; struct ieee80211_bss *bss; struct ieee80211_channel *new_ch; - int new_freq; struct ieee80211_chanctx *chanctx; + enum ieee80211_band new_band; + int new_freq; + u8 new_chan_no; + u8 count; + u8 mode; ASSERT_MGD_MTX(ifmgd); if (!cbss) return; - if (sdata->local->scanning) + if (local->scanning) return; /* disregard subsequent announcements if we are already processing */ if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED) return; - if (!elems->ch_switch_ie) + if (elems->ext_chansw_ie) { + if (!ieee80211_operating_class_to_band( + elems->ext_chansw_ie->new_operating_class, + &new_band)) { + sdata_info(sdata, + "cannot understand ECSA IE operating class %d, disconnecting\n", + elems->ext_chansw_ie->new_operating_class); + ieee80211_queue_work(&local->hw, + &ifmgd->csa_connection_drop_work); + } + new_chan_no = elems->ext_chansw_ie->new_ch_num; + count = elems->ext_chansw_ie->count; + mode = elems->ext_chansw_ie->mode; + } else if (elems->ch_switch_ie) { + new_band = cbss->channel->band; + new_chan_no = elems->ch_switch_ie->new_ch_num; + count = elems->ch_switch_ie->count; + mode = elems->ch_switch_ie->mode; + } else { + /* nothing here we understand */ return; + } bss = (void *)cbss->priv; - new_freq = ieee80211_channel_to_frequency( - elems->ch_switch_ie->new_ch_num, - cbss->channel->band); - new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); + new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); + new_ch = ieee80211_get_channel(local->hw.wiphy, new_freq); if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) { sdata_info(sdata, "AP %pM switches to unsupported channel (%d MHz), disconnecting\n", ifmgd->associated->bssid, new_freq); - ieee80211_queue_work(&sdata->local->hw, + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); return; } ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; - if (sdata->local->use_chanctx) { + if (local->use_chanctx) { sdata_info(sdata, "not handling channel switch with channel contexts\n"); - ieee80211_queue_work(&sdata->local->hw, + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); return; } - mutex_lock(&sdata->local->chanctx_mtx); + mutex_lock(&local->chanctx_mtx); if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) { - mutex_unlock(&sdata->local->chanctx_mtx); + mutex_unlock(&local->chanctx_mtx); return; } chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf), @@ -1081,40 +1104,39 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, if (chanctx->refcount > 1) { sdata_info(sdata, "channel switch with multiple interfaces on the same channel, disconnecting\n"); - ieee80211_queue_work(&sdata->local->hw, + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); - mutex_unlock(&sdata->local->chanctx_mtx); + mutex_unlock(&local->chanctx_mtx); return; } - mutex_unlock(&sdata->local->chanctx_mtx); + mutex_unlock(&local->chanctx_mtx); - sdata->local->csa_channel = new_ch; + local->csa_channel = new_ch; - if (elems->ch_switch_ie->mode) - ieee80211_stop_queues_by_reason(&sdata->local->hw, + if (mode) + ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - if (sdata->local->ops->channel_switch) { + if (local->ops->channel_switch) { /* use driver's channel switch callback */ struct ieee80211_channel_switch ch_switch = { .timestamp = timestamp, - .block_tx = elems->ch_switch_ie->mode, + .block_tx = mode, .channel = new_ch, - .count = elems->ch_switch_ie->count, + .count = count, }; - drv_channel_switch(sdata->local, &ch_switch); + drv_channel_switch(local, &ch_switch); return; } /* channel switch handled in software */ - if (elems->ch_switch_ie->count <= 1) - ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); + if (count <= 1) + ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); else mod_timer(&ifmgd->chswitch_timer, - TU_TO_EXP_TIME(elems->ch_switch_ie->count * - cbss->beacon_interval)); + TU_TO_EXP_TIME(count * cbss->beacon_interval)); } static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, @@ -2629,6 +2651,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel; bool need_ps = false; + lockdep_assert_held(&sdata->u.mgd.mtx); + if ((sdata->u.mgd.associated && ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || (sdata->u.mgd.assoc_data && @@ -2670,6 +2694,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, } ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems); + } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1d6217ac3ba3..e4a6d559372d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -863,6 +863,13 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, } elems->ch_switch_ie = (void *)pos; break; + case WLAN_EID_EXT_CHANSWITCH_ANN: + if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { + elem_parse_failed = true; + break; + } + elems->ext_chansw_ie = (void *)pos; + break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; -- cgit v1.2.3-70-g09d2 From 85220d71bf3ca1ba9129e0744247ae5f61bec559 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 25 Mar 2013 18:29:27 +0100 Subject: mac80211: support secondary channel offset in CSA Add support for the secondary channel offset IE in channel switch announcements. This is necessary for proper handling of CSA on HT access points. For this to work it is also necessary to convert everything here to use chandef structs instead of just channels. The driver updates aren't really correct though. In particular, the TI wl18xx driver update can't possibly be right since it just ignores the new channel width for lack of firmware API. Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlegacy/4965-mac.c | 32 ++++++------- drivers/net/wireless/iwlegacy/4965.c | 2 +- drivers/net/wireless/iwlwifi/dvm/devices.c | 10 ++-- drivers/net/wireless/iwlwifi/dvm/mac80211.c | 20 ++++++-- drivers/net/wireless/iwlwifi/dvm/rxon.c | 2 +- drivers/net/wireless/ti/wl12xx/cmd.c | 2 +- drivers/net/wireless/ti/wl18xx/cmd.c | 6 +-- include/linux/ieee80211.h | 11 +++++ include/net/mac80211.h | 4 +- net/mac80211/ieee80211_i.h | 3 +- net/mac80211/mlme.c | 71 +++++++++++++++++++++++------ net/mac80211/trace.h | 8 ++-- net/mac80211/util.c | 8 ++++ 13 files changed, 125 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index c092fcbbe965..cb5882ea5f3a 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -6057,7 +6057,7 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw, struct il_priv *il = hw->priv; const struct il_channel_info *ch_info; struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; + struct ieee80211_channel *channel = ch_switch->chandef.chan; struct il_ht_config *ht_conf = &il->current_ht_config; u16 ch; @@ -6094,23 +6094,21 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw, il->current_ht_config.smps = conf->smps_mode; /* Configure HT40 channels */ - il->ht.enabled = conf_is_ht(conf); - if (il->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - il->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - il->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - il->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - il->ht.is_40mhz = true; - } else { - il->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - il->ht.is_40mhz = false; - } - } else + switch (cfg80211_get_chandef_type(&ch_switch->chandef)) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: il->ht.is_40mhz = false; + il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + case NL80211_CHAN_HT40MINUS: + il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + il->ht.is_40mhz = true; + break; + case NL80211_CHAN_HT40PLUS: + il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + il->ht.is_40mhz = true; + break; + } if ((le16_to_cpu(il->staging.channel) != ch)) il->staging.flags = 0; diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c index 91eb2d07fdb8..777a578294bd 100644 --- a/drivers/net/wireless/iwlegacy/4965.c +++ b/drivers/net/wireless/iwlegacy/4965.c @@ -1493,7 +1493,7 @@ il4965_hw_channel_switch(struct il_priv *il, cmd.band = band; cmd.expect_beacon = 0; - ch = ch_switch->channel->hw_value; + ch = ch_switch->chandef.chan->hw_value; cmd.channel = cpu_to_le16(ch); cmd.rxon_flags = il->staging.flags; cmd.rxon_filter_flags = il->staging.filter_flags; diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c index 15cca2ef9294..c48907c8ab43 100644 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/iwlwifi/dvm/devices.c @@ -379,7 +379,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, }; cmd.band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->channel->hw_value; + ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", ctx->active.channel, ch); cmd.channel = cpu_to_le16(ch); @@ -414,7 +414,8 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, } IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", cmd.switch_time); - cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; + cmd.expect_beacon = + ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; return iwl_dvm_send_cmd(priv, &hcmd); } @@ -540,7 +541,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, hcmd.data[0] = cmd; cmd->band = priv->band == IEEE80211_BAND_2GHZ; - ch = ch_switch->channel->hw_value; + ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", ctx->active.channel, ch); cmd->channel = cpu_to_le16(ch); @@ -575,7 +576,8 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, } IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", cmd->switch_time); - cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; + cmd->expect_beacon = + ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR; err = iwl_dvm_send_cmd(priv, &hcmd); kfree(cmd); diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index a7294fa4d7e5..2dc101fe0d24 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -967,7 +967,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = ch_switch->channel; + struct ieee80211_channel *channel = ch_switch->chandef.chan; struct iwl_ht_config *ht_conf = &priv->current_ht_config; /* * MULTI-FIXME @@ -1005,11 +1005,21 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, priv->current_ht_config.smps = conf->smps_mode; /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) - iwlagn_config_ht40(conf, ctx); - else + switch (cfg80211_get_chandef_type(&ch_switch->chandef)) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: ctx->ht.is_40mhz = false; + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + case NL80211_CHAN_HT40MINUS: + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + break; + case NL80211_CHAN_HT40PLUS: + ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + break; + } if ((le16_to_cpu(ctx->staging.channel) != ch)) ctx->staging.flags = 0; diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 085c589e7149..acbb50b5f1e8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1160,7 +1160,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) } void iwlagn_config_ht40(struct ieee80211_conf *conf, - struct iwl_rxon_context *ctx) + struct iwl_rxon_context *ctx) { if (conf_is_ht40_minus(conf)) { ctx->ht.extension_chan_offset = diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c index 7dc9f965037d..7485dbae8c4b 100644 --- a/drivers/net/wireless/ti/wl12xx/cmd.c +++ b/drivers/net/wireless/ti/wl12xx/cmd.c @@ -301,7 +301,7 @@ int wl12xx_cmd_channel_switch(struct wl1271 *wl, } cmd->role_id = wlvif->role_id; - cmd->channel = ch_switch->channel->hw_value; + cmd->channel = ch_switch->chandef.chan->hw_value; cmd->switch_time = ch_switch->count; cmd->stop_tx = ch_switch->block_tx; diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c index 1d1f6cc7a50a..7649c75cd68d 100644 --- a/drivers/net/wireless/ti/wl18xx/cmd.c +++ b/drivers/net/wireless/ti/wl18xx/cmd.c @@ -42,11 +42,11 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl, } cmd->role_id = wlvif->role_id; - cmd->channel = ch_switch->channel->hw_value; + cmd->channel = ch_switch->chandef.chan->hw_value; cmd->switch_time = ch_switch->count; cmd->stop_tx = ch_switch->block_tx; - switch (ch_switch->channel->band) { + switch (ch_switch->chandef.chan->band) { case IEEE80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; @@ -55,7 +55,7 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl, break; default: wl1271_error("invalid channel switch band: %d", - ch_switch->channel->band); + ch_switch->chandef.chan->band); ret = -EINVAL; goto out_free; } diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 2a10acc65a54..95621528436c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -684,6 +684,16 @@ struct ieee80211_ext_chansw_ie { u8 count; } __packed; +/** + * struct ieee80211_sec_chan_offs_ie - secondary channel offset IE + * @sec_chan_offs: secondary channel offset, uses IEEE80211_HT_PARAM_CHA_SEC_* + * values here + * This structure represents the "Secondary Channel Offset element" + */ +struct ieee80211_sec_chan_offs_ie { + u8 sec_chan_offs; +} __packed; + /** * struct ieee80211_tim * @@ -1648,6 +1658,7 @@ enum ieee80211_eid { WLAN_EID_HT_CAPABILITY = 45, WLAN_EID_HT_OPERATION = 61, + WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, WLAN_EID_RSN = 48, WLAN_EID_MMIE = 76, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0dde213dd3b6..9ff10b33b711 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1017,13 +1017,13 @@ struct ieee80211_conf { * the driver passed into mac80211. * @block_tx: Indicates whether transmission must be blocked before the * scheduled channel switch, as indicated by the AP. - * @channel: the new channel to switch to + * @chandef: the new channel to switch to * @count: the number of TBTT's until the channel switch event */ struct ieee80211_channel_switch { u64 timestamp; bool block_tx; - struct ieee80211_channel *channel; + struct cfg80211_chan_def chandef; u8 count; }; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 10c3180b165e..8f240c0ec304 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1019,7 +1019,7 @@ struct ieee80211_local { enum mac80211_scan_state next_scan_state; struct delayed_work scan_work; struct ieee80211_sub_if_data __rcu *scan_sdata; - struct ieee80211_channel *csa_channel; + struct cfg80211_chan_def csa_chandef; /* For backward compatibility only -- do not use */ struct cfg80211_chan_def _oper_chandef; @@ -1183,6 +1183,7 @@ struct ieee802_11_elems { const u8 *pwr_constr_elem; const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; /* length of them, respectively */ u8 ssid_len; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bc6f87edc624..bd581a80e4b7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -289,6 +289,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } else { /* 40 MHz (and 80 MHz) must be supported for VHT */ ret = IEEE80211_STA_DISABLE_VHT; + /* also mark 40 MHz disabled */ + ret |= IEEE80211_STA_DISABLE_40MHZ; goto out; } @@ -964,16 +966,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) if (!ifmgd->associated) goto out; - /* - * FIXME: Here we are downgrading to NL80211_CHAN_WIDTH_20_NOHT - * and don't adjust our ht/vht settings - * This is wrong - we should behave according to the CSA params - */ - local->_oper_chandef.chan = local->csa_channel; - local->_oper_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; - local->_oper_chandef.center_freq1 = - local->_oper_chandef.chan->center_freq; - local->_oper_chandef.center_freq2 = 0; + local->_oper_chandef = local->csa_chandef; if (!local->ops->channel_switch) { /* call "hw_config" only if doing sw channel switch */ @@ -1028,13 +1021,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss = ifmgd->associated; struct ieee80211_bss *bss; - struct ieee80211_channel *new_ch; struct ieee80211_chanctx *chanctx; enum ieee80211_band new_band; int new_freq; u8 new_chan_no; u8 count; u8 mode; + struct cfg80211_chan_def new_chandef = {}; + int secondary_channel_offset = -1; ASSERT_MGD_MTX(ifmgd); @@ -1048,6 +1042,19 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED) return; + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { + /* if HT is enabled and the IE not present, it's still HT */ + secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + if (elems->sec_chan_offs) + secondary_channel_offset = + elems->sec_chan_offs->sec_chan_offs; + } + + if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ && + (secondary_channel_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE || + secondary_channel_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)) + secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; + if (elems->ext_chansw_ie) { if (!ieee80211_operating_class_to_band( elems->ext_chansw_ie->new_operating_class, @@ -1074,8 +1081,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, bss = (void *)cbss->priv; new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); - new_ch = ieee80211_get_channel(local->hw.wiphy, new_freq); - if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) { + new_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); + if (!new_chandef.chan || + new_chandef.chan->flags & IEEE80211_CHAN_DISABLED) { sdata_info(sdata, "AP %pM switches to unsupported channel (%d MHz), disconnecting\n", ifmgd->associated->bssid, new_freq); @@ -1084,6 +1092,39 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + switch (secondary_channel_offset) { + default: + /* secondary_channel_offset was present but is invalid */ + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + cfg80211_chandef_create(&new_chandef, new_chandef.chan, + NL80211_CHAN_HT20); + break; + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + cfg80211_chandef_create(&new_chandef, new_chandef.chan, + NL80211_CHAN_HT40PLUS); + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + cfg80211_chandef_create(&new_chandef, new_chandef.chan, + NL80211_CHAN_HT40MINUS); + break; + case -1: + cfg80211_chandef_create(&new_chandef, new_chandef.chan, + NL80211_CHAN_NO_HT); + break; + } + + if (!cfg80211_chandef_usable(local->hw.wiphy, &new_chandef, + IEEE80211_CHAN_DISABLED)) { + sdata_info(sdata, + "AP %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + ifmgd->associated->bssid, new_freq, + new_chandef.width, new_chandef.center_freq1, + new_chandef.center_freq2); + ieee80211_queue_work(&local->hw, + &ifmgd->csa_connection_drop_work); + return; + } + ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; if (local->use_chanctx) { @@ -1111,7 +1152,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, } mutex_unlock(&local->chanctx_mtx); - local->csa_channel = new_ch; + local->csa_chandef = new_chandef; if (mode) ieee80211_stop_queues_by_reason(&local->hw, @@ -1123,7 +1164,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch ch_switch = { .timestamp = timestamp, .block_tx = mode, - .channel = new_ch, + .chandef = new_chandef, .count = count, }; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 8286dcef228b..c215fafd7a2f 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -990,23 +990,23 @@ TRACE_EVENT(drv_channel_switch, TP_STRUCT__entry( LOCAL_ENTRY + CHANDEF_ENTRY __field(u64, timestamp) __field(bool, block_tx) - __field(u16, freq) __field(u8, count) ), TP_fast_assign( LOCAL_ASSIGN; + CHANDEF_ASSIGN(&ch_switch->chandef) __entry->timestamp = ch_switch->timestamp; __entry->block_tx = ch_switch->block_tx; - __entry->freq = ch_switch->channel->center_freq; __entry->count = ch_switch->count; ), TP_printk( - LOCAL_PR_FMT " new freq:%u count:%d", - LOCAL_PR_ARG, __entry->freq, __entry->count + LOCAL_PR_FMT " new " CHANDEF_PR_FMT " count:%d", + LOCAL_PR_ARG, CHANDEF_PR_ARG, __entry->count ) ); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e4a6d559372d..155056c90edf 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -716,6 +716,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: + case WLAN_EID_SECONDARY_CHANNEL_OFFSET: if (test_bit(id, seen_elems)) { elems->parse_error = true; left -= elen; @@ -870,6 +871,13 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, } elems->ext_chansw_ie = (void *)pos; break; + case WLAN_EID_SECONDARY_CHANNEL_OFFSET: + if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { + elem_parse_failed = true; + break; + } + elems->sec_chan_offs = (void *)pos; + break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; -- cgit v1.2.3-70-g09d2 From 1b3a2e494bc793445f576c5476e9767cf7621684 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Mar 2013 15:17:18 +0100 Subject: mac80211: handle extended channel switch announcement Handle the (public) extended channel switch announcement action frames. Parts of the data in these frames isn't really in IEs, but put it into the elems struct anyway to simplify the handling. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 6 ++++++ net/mac80211/mlme.c | 31 +++++++++++++++++++++++++++---- net/mac80211/rx.c | 16 ++++++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 95621528436c..ce07161c8735 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -864,6 +864,11 @@ struct ieee80211_mgmt { u8 action_code; u8 variable[0]; } __packed chan_switch; + struct{ + u8 action_code; + struct ieee80211_ext_chansw_ie data; + u8 variable[0]; + } __packed ext_chan_switch; struct{ u8 action_code; u8 dialog_token; @@ -1816,6 +1821,7 @@ enum ieee80211_key_len { /* Public action codes */ enum ieee80211_pub_actioncode { + WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, }; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bd581a80e4b7..c53aedb47a6a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3100,6 +3100,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, enum rx_mgmt_action rma = RX_MGMT_NONE; u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; u16 fc; + struct ieee802_11_elems elems; + int ies_len; rx_status = (struct ieee80211_rx_status *) skb->cb; mgmt = (struct ieee80211_mgmt *) skb->data; @@ -3130,10 +3132,9 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { - struct ieee802_11_elems elems; - int ies_len = skb->len - - offsetof(struct ieee80211_mgmt, - u.action.u.chan_switch.variable); + ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.chan_switch.variable); if (ies_len < 0) break; @@ -3145,6 +3146,28 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, if (elems.parse_error) break; + ieee80211_sta_process_chanswitch(sdata, + rx_status->mactime, + &elems); + } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { + ies_len = skb->len - + offsetof(struct ieee80211_mgmt, + u.action.u.ext_chan_switch.variable); + + if (ies_len < 0) + break; + + ieee802_11_parse_elems( + mgmt->u.action.u.ext_chan_switch.variable, + ies_len, &elems); + + if (elems.parse_error) + break; + + /* for the handling code pretend this was also an IE */ + elems.ext_chansw_ie = + &mgmt->u.action.u.ext_chan_switch.data; + ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, &elems); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e9825f15c14c..643fcf7c9dcd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2424,6 +2424,22 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) } break; + case WLAN_CATEGORY_PUBLIC: + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + goto invalid; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + if (!rx->sta) + break; + if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) + break; + if (mgmt->u.action.u.ext_chan_switch.action_code != + WLAN_PUB_ACTION_EXT_CHANSW_ANN) + break; + if (len < offsetof(struct ieee80211_mgmt, + u.action.u.ext_chan_switch.variable)) + goto invalid; + goto queue; case WLAN_CATEGORY_VHT: if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && -- cgit v1.2.3-70-g09d2 From b2e506bfc4d752b68a0ccaae1e977898263eba4c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Mar 2013 14:54:16 +0100 Subject: mac80211: parse VHT channel switch IEs VHT introduces multiple IEs that need to be parsed for a wide bandwidth channel switch. Two are (currently) needed in mac80211: * wide bandwidth channel switch element * channel switch wrapper element The former is contained in the latter for beacons and probe responses, but not for the spectrum management action frames so the IE parser needs a new argument to differentiate them. Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 10 ++++++++++ net/mac80211/ibss.c | 2 +- net/mac80211/ieee80211_i.h | 7 ++++--- net/mac80211/mesh.c | 4 ++-- net/mac80211/mesh_hwmp.c | 2 +- net/mac80211/mesh_plink.c | 2 +- net/mac80211/mlme.c | 16 ++++++++-------- net/mac80211/scan.c | 2 +- net/mac80211/util.c | 36 +++++++++++++++++++++++++++++++++++- 9 files changed, 63 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index ce07161c8735..06b0ed0154a4 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -694,6 +694,14 @@ struct ieee80211_sec_chan_offs_ie { u8 sec_chan_offs; } __packed; +/** + * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE + */ +struct ieee80211_wide_bw_chansw_ie { + u8 new_channel_width; + u8 new_center_freq_seg0, new_center_freq_seg1; +} __packed; + /** * struct ieee80211_tim * @@ -1698,6 +1706,8 @@ enum ieee80211_eid { WLAN_EID_VHT_CAPABILITY = 191, WLAN_EID_VHT_OPERATION = 192, WLAN_EID_OPMODE_NOTIF = 199, + WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, + WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, /* 802.11ad */ WLAN_EID_NON_TX_BSSID_CAP = 83, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b7bf6d76f1d9..170f9a7fa319 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -914,7 +914,7 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - &elems); + false, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8f240c0ec304..f4a65a340a52 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1179,6 +1179,7 @@ struct ieee802_11_elems { const struct ieee80211_rann_ie *rann; const struct ieee80211_channel_sw_ie *ch_switch_ie; const struct ieee80211_ext_chansw_ie *ext_chansw_ie; + const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; const u8 *country_elem; const u8 *pwr_constr_elem; const struct ieee80211_timeout_interval_ie *timeout_int; @@ -1490,13 +1491,13 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, ieee80211_tx_skb_tid(sdata, skb, 7); } -u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc); -static inline void ieee802_11_parse_elems(u8 *start, size_t len, +static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems) { - ieee802_11_parse_elems_crc(start, len, elems, 0, 0); + ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); } u32 ieee80211_mandatory_rates(struct ieee80211_local *local, diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 0acc2874d294..4b984765d62d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -838,7 +838,7 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; - ieee802_11_parse_elems(pos, len - baselen, &elems); + ieee802_11_parse_elems(pos, len - baselen, false, &elems); /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && @@ -899,7 +899,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - &elems); + false, &elems); /* ignore non-mesh or secure / unsecure mismatch */ if ((!elems.mesh_id || !elems.mesh_config) || diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index c82d5e6a24c0..486819cd02cd 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -880,7 +880,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, - len - baselen, &elems); + len - baselen, false, &elems); if (elems.preq) { if (elems.preq_len != 37) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index cdd41835334d..09bebed99416 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -687,7 +687,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, baseaddr += 4; baselen += 4; } - ieee802_11_parse_elems(baseaddr, len - baselen, &elems); + ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); if (!elems.peering) { mpl_dbg(sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c53aedb47a6a..3e0421265bfe 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2203,7 +2203,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, u32 tx_flags = 0; pos = mgmt->u.auth.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems); if (!elems.challenge) return; auth_data->expected_transaction = 4; @@ -2468,7 +2468,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } pos = mgmt->u.assoc_resp.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems); if (!elems.supp_rates) { sdata_info(sdata, "no SuppRates element in AssocResp\n"); @@ -2637,7 +2637,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); pos = mgmt->u.assoc_resp.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems.timeout_int && @@ -2760,7 +2760,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - &elems); + false, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); @@ -2843,7 +2843,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { ieee802_11_parse_elems(mgmt->u.beacon.variable, - len - baselen, &elems); + len - baselen, false, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); ifmgd->assoc_data->have_beacon = true; @@ -2953,7 +2953,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, - len - baselen, &elems, + len - baselen, false, &elems, care_about_ies, ncrc); if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { @@ -3141,7 +3141,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, - ies_len, &elems); + ies_len, true, &elems); if (elems.parse_error) break; @@ -3159,7 +3159,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems( mgmt->u.action.u.ext_chan_switch.variable, - ies_len, &elems); + ies_len, true, &elems); if (elems.parse_error) break; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 33fbf1045690..99b103921a4b 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -181,7 +181,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) if (baselen > skb->len) return; - ieee802_11_parse_elems(elements, skb->len - baselen, &elems); + ieee802_11_parse_elems(elements, skb->len - baselen, false, &elems); channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 155056c90edf..3f87fa468b1f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -661,7 +661,7 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_queue_delayed_work); -u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc) { @@ -669,6 +669,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, u8 *pos = start; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); + const u8 *ie; bitmap_zero(seen_elems, 256); memset(elems, 0, sizeof(*elems)); @@ -717,6 +718,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: case WLAN_EID_SECONDARY_CHANNEL_OFFSET: + case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: + /* + * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible + * that if the content gets bigger it might be needed more than once + */ if (test_bit(id, seen_elems)) { elems->parse_error = true; left -= elen; @@ -878,6 +884,34 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, } elems->sec_chan_offs = (void *)pos; break; + case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: + if (!action || + elen != sizeof(*elems->wide_bw_chansw_ie)) { + elem_parse_failed = true; + break; + } + elems->wide_bw_chansw_ie = (void *)pos; + break; + case WLAN_EID_CHANNEL_SWITCH_WRAPPER: + if (action) { + elem_parse_failed = true; + break; + } + /* + * This is a bit tricky, but as we only care about + * the wide bandwidth channel switch element, so + * just parse it out manually. + */ + ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, + pos, elen); + if (ie) { + if (ie[1] == sizeof(*elems->wide_bw_chansw_ie)) + elems->wide_bw_chansw_ie = + (void *)(ie + 2); + else + elem_parse_failed = true; + } + break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; -- cgit v1.2.3-70-g09d2 From 43b5abe0640100a9e9424c91298c7993d443ffb7 Mon Sep 17 00:00:00 2001 From: Sascha Herrmann Date: Sun, 14 Apr 2013 22:33:28 +0000 Subject: at86rf230: add irq type configuration option Add option to at86rf230 platform data to configure the type of the interrupt used by the driver. The irq polarity of the device will be configured accordingly. Signed-off-by: Sascha Herrmann Signed-off-by: David S. Miller --- drivers/net/ieee802154/at86rf230.c | 47 +++++++++++++++++++++++++------------- include/linux/spi/at86rf230.h | 14 ++++++++++++ 2 files changed, 45 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index fc315ddea61c..cf098889ba64 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -219,6 +219,9 @@ struct at86rf230_local { #define IRQ_PLL_UNL (1 << 1) #define IRQ_PLL_LOCK (1 << 0) +#define IRQ_ACTIVE_HIGH 0 +#define IRQ_ACTIVE_LOW 1 + #define STATE_P_ON 0x00 /* BUSY */ #define STATE_BUSY_RX 0x01 #define STATE_BUSY_TX 0x02 @@ -726,11 +729,16 @@ static irqreturn_t at86rf230_isr(int irq, void *data) return IRQ_HANDLED; } +static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol) +{ + return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol); +} static int at86rf230_hw_init(struct at86rf230_local *lp) { + struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data; + int rc, irq_pol; u8 status; - int rc; rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); if (rc) @@ -748,6 +756,16 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) dev_info(&lp->spi->dev, "Status: %02x\n", status); } + /* configure irq polarity, defaults to high active */ + if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) + irq_pol = IRQ_ACTIVE_LOW; + else + irq_pol = IRQ_ACTIVE_HIGH; + + rc = at86rf230_irq_polarity(lp, irq_pol); + if (rc) + return rc; + rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR | * IRQ_CCA_ED | * IRQ_TRX_END | @@ -798,37 +816,36 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) return 0; } -static int at86rf230_fill_data(struct spi_device *spi) +static void at86rf230_fill_data(struct spi_device *spi) { struct at86rf230_local *lp = spi_get_drvdata(spi); struct at86rf230_platform_data *pdata = spi->dev.platform_data; - if (!pdata) { - dev_err(&spi->dev, "no platform_data\n"); - return -EINVAL; - } - lp->rstn = pdata->rstn; lp->slp_tr = pdata->slp_tr; lp->dig2 = pdata->dig2; - - return 0; } static int at86rf230_probe(struct spi_device *spi) { + struct at86rf230_platform_data *pdata; struct ieee802154_dev *dev; struct at86rf230_local *lp; u8 man_id_0, man_id_1; - int rc; + int rc, supported = 0; const char *chip; - int supported = 0; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); return -EINVAL; } + pdata = spi->dev.platform_data; + if (!pdata) { + dev_err(&spi->dev, "no platform_data\n"); + return -EINVAL; + } + dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); if (!dev) return -ENOMEM; @@ -851,9 +868,7 @@ static int at86rf230_probe(struct spi_device *spi) spi_set_drvdata(spi, lp); - rc = at86rf230_fill_data(spi); - if (rc) - goto err_fill; + at86rf230_fill_data(spi); rc = gpio_request(lp->rstn, "rstn"); if (rc) @@ -928,7 +943,8 @@ static int at86rf230_probe(struct spi_device *spi) if (rc) goto err_gpio_dir; - rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED, + rc = request_irq(spi->irq, at86rf230_isr, + IRQF_SHARED | pdata->irq_type, dev_name(&spi->dev), lp); if (rc) goto err_gpio_dir; @@ -948,7 +964,6 @@ err_gpio_dir: err_slp_tr: gpio_free(lp->rstn); err_rstn: -err_fill: spi_set_drvdata(spi, NULL); mutex_destroy(&lp->bmux); ieee802154_free_device(lp->dev); diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index b2b1afbb3202..aa327a8105ad 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -26,6 +26,20 @@ struct at86rf230_platform_data { int rstn; int slp_tr; int dig2; + + /* Setting the irq_type will configure the driver to request + * the platform irq trigger type according to the given value + * and configure the interrupt polarity of the device to the + * corresponding polarity. + * + * Allowed values are: IRQF_TRIGGER_RISING, IRQF_TRIGGER_FALLING, + * IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW + * + * Setting it to 0, the driver does not touch the trigger type + * configuration of the interrupt and sets the interrupt polarity + * of the device to high active (the default value). + */ + int irq_type; }; #endif -- cgit v1.2.3-70-g09d2 From f646968f8f7c624587de729115d802372b9063dd Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 19 Apr 2013 02:04:27 +0000 Subject: net: vlan: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_* Rename the hardware VLAN acceleration features to include "CTAG" to indicate that they only support CTAGs. Follow up patches will introduce 802.1ad server provider tagging (STAGs) and require the distinction for hardware not supporting acclerating both. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/nes_nic.c | 14 ++++++------ drivers/net/bonding/bond_main.c | 6 +++--- drivers/net/ethernet/3com/typhoon.c | 4 ++-- drivers/net/ethernet/adaptec/starfire.c | 2 +- drivers/net/ethernet/alteon/acenic.c | 2 +- drivers/net/ethernet/amd/amd8111e.c | 4 ++-- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 22 +++++++++---------- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 14 ++++++------ drivers/net/ethernet/atheros/atlx/atl1.c | 4 ++-- drivers/net/ethernet/atheros/atlx/atl2.c | 16 +++++++------- drivers/net/ethernet/atheros/atlx/atlx.c | 10 ++++----- drivers/net/ethernet/broadcom/bnx2.c | 10 ++++----- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 ++-- drivers/net/ethernet/broadcom/tg3.c | 2 +- drivers/net/ethernet/brocade/bna/bnad.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 13 +++++------ drivers/net/ethernet/chelsio/cxgb/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 20 +++++++++-------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 ++++----- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 15 +++++++------ drivers/net/ethernet/cisco/enic/enic_main.c | 4 ++-- drivers/net/ethernet/emulex/benet/be_main.c | 4 ++-- drivers/net/ethernet/freescale/gianfar.c | 15 +++++++------ drivers/net/ethernet/freescale/gianfar_ethtool.c | 2 +- drivers/net/ethernet/ibm/ehea/ehea_main.c | 8 +++---- drivers/net/ethernet/intel/e1000/e1000_main.c | 16 +++++++------- drivers/net/ethernet/intel/e1000e/netdev.c | 10 ++++----- drivers/net/ethernet/intel/igb/igb_main.c | 18 ++++++++-------- drivers/net/ethernet/intel/igbvf/netdev.c | 6 +++--- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 14 ++++++------ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 12 +++++------ drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +++--- drivers/net/ethernet/jme.c | 4 ++-- drivers/net/ethernet/marvell/sky2.c | 9 ++++---- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 ++-- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 7 +++--- drivers/net/ethernet/natsemi/ns83820.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 ++-- drivers/net/ethernet/nvidia/forcedeth.c | 20 +++++++++-------- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4 ++-- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 16 +++++++------- drivers/net/ethernet/realtek/8139cp.c | 6 +++--- drivers/net/ethernet/realtek/r8169.c | 14 ++++++------ drivers/net/ethernet/renesas/sh_eth.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 6 +++--- drivers/net/ethernet/ti/cpsw.c | 4 ++-- drivers/net/ethernet/toshiba/spider_net.c | 4 ++-- drivers/net/ethernet/via/via-rhine.c | 5 +++-- drivers/net/ethernet/via/via-velocity.c | 7 +++--- drivers/net/ethernet/xilinx/ll_temac_main.c | 6 +++--- drivers/net/hyperv/netvsc_drv.c | 2 +- drivers/net/ifb.c | 2 +- drivers/net/macvlan.c | 2 +- drivers/net/team/team.c | 6 +++--- drivers/net/usb/cdc_mbim.c | 2 +- drivers/net/veth.c | 2 +- drivers/net/virtio_net.c | 2 +- drivers/net/vmxnet3/vmxnet3_drv.c | 11 +++++----- drivers/net/vmxnet3/vmxnet3_ethtool.c | 5 +++-- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 6 +++--- drivers/scsi/fcoe/fcoe.c | 2 +- include/linux/if_vlan.h | 4 ++-- include/linux/netdev_features.h | 12 +++++------ include/linux/netdevice.h | 6 ++++-- net/8021q/vlan.c | 6 +++--- net/8021q/vlan_core.c | 4 ++-- net/8021q/vlan_dev.c | 2 +- net/bridge/br_device.c | 4 ++-- net/bridge/br_vlan.c | 6 +++--- net/core/dev.c | 9 ++++---- net/core/ethtool.c | 25 +++++++++++----------- net/core/netpoll.c | 2 +- net/openvswitch/vport-internal_dev.c | 2 +- 78 files changed, 285 insertions(+), 266 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 85cf4d1ac442..49eb5111d2cd 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, /* Enable/Disable VLAN Stripping */ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) u32temp &= 0xfdffffff; else u32temp |= 0x02000000; @@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature struct nes_device *nesdev = nesvnic->nesdev; u32 changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) nes_vlan_mode(netdev, nesdev, features); return 0; @@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netdev->dev_addr[4] = (u8)(u64temp>>8); netdev->dev_addr[5] = (u8)u64temp; - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) netdev->hw_features |= NETIF_F_TSO; - netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX; + netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2aac890320cb..8d324f8a1757 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4322,9 +4322,9 @@ static void bond_setup(struct net_device *bond_dev) */ bond_dev->hw_features = BOND_VLAN_FEATURES | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); bond_dev->features |= bond_dev->hw_features; diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 27aaaf99e73e..839a96213742 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2445,9 +2445,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * settings -- so we only allow the user to toggle the TX processing. */ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_TX; dev->features = dev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; if(register_netdev(dev) < 0) { err_msg = "unable to register netdev"; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 549b77500579..365865130f7c 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -702,7 +702,7 @@ static int starfire_init_one(struct pci_dev *pdev, #endif /* ZEROCOPY */ #ifdef VLAN_SUPPORT - dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; #endif /* VLAN_RX_KILL_VID */ #ifdef ADDR_64BITS dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index c0bc41a784ca..a7689d931053 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -472,7 +472,7 @@ static int acenic_probe_one(struct pci_dev *pdev, ap->name = pci_name(pdev); dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->watchdog_timeo = 5*HZ; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 42d4e6ad58a5..6bad84d6e2c4 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1869,7 +1869,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); #if AMD8111E_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ; #endif lp = netdev_priv(dev); @@ -1907,7 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); #if AMD8111E_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; #endif /* Probe the external PHY */ amd8111e_probe_ext_phy(dev); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1f07fc633ab9..3565255cdd81 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -417,7 +417,7 @@ static void atl1c_set_multi(struct net_device *netdev) static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; } else { @@ -494,10 +494,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; if (netdev->mtu > MAX_TSO_FRAME_SIZE) features &= ~(NETIF_F_TSO | NETIF_F_TSO6); @@ -510,7 +510,7 @@ static int atl1c_set_features(struct net_device *netdev, { netdev_features_t changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl1c_vlan_mode(netdev, features); return 0; @@ -2475,13 +2475,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) atl1c_set_ethtool_ops(netdev); /* TODO: add when ready */ - netdev->hw_features = NETIF_F_SG | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_RX | - NETIF_F_TSO | + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_TSO | NETIF_F_TSO6; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_TX; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; return 0; } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d058d0061ed0..598a61151668 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -315,7 +315,7 @@ static void atl1e_set_multi(struct net_device *netdev) static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; } else { @@ -378,10 +378,10 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -391,7 +391,7 @@ static int atl1e_set_features(struct net_device *netdev, { netdev_features_t changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl1e_vlan_mode(netdev, features); return 0; @@ -2198,9 +2198,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) atl1e_set_ethtool_ops(netdev); netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_RX; netdev->features = netdev->hw_features | NETIF_F_LLTX | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_TX; return 0; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 8338013ab33d..fd7d85044e4a 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3018,10 +3018,10 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features = NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SG; - netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | - NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_RX; /* is this valid? see atl1_setup_mac_ctrl() */ netdev->features |= NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index a046b6ff847c..6b2c08a89b7e 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -363,7 +363,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter) static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) { - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *ctrl |= MAC_CTRL_RMV_VLAN; } else { @@ -399,10 +399,10 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -412,7 +412,7 @@ static int atl2_set_features(struct net_device *netdev, { netdev_features_t changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl2_vlan_mode(netdev, features); return 0; @@ -887,7 +887,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, skb->len-copy_len); offset = ((u32)(skb->len-copy_len + 3) & ~3); } -#ifdef NETIF_F_HW_VLAN_TX +#ifdef NETIF_F_HW_VLAN_CTAG_TX if (vlan_tx_tag_present(skb)) { u16 vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | @@ -1413,8 +1413,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX; - netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); /* Init PHY as early as possible due to power saving issue */ atl2_phy_init(&adapter->hw); diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index f82eb1699464..46a622cceee4 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -220,7 +220,7 @@ static void atlx_link_chg_task(struct work_struct *work) static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) { - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *ctrl |= MAC_CTRL_RMV_VLAN; } else { @@ -257,10 +257,10 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -270,7 +270,7 @@ static int atlx_set_features(struct net_device *netdev, { netdev_features_t changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) atlx_vlan_mode(netdev, features); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index f27b549b692d..42a8bc8df5dd 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3553,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev) rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; - if (!(dev->features & NETIF_F_HW_VLAN_RX) && + if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; if (dev->flags & IFF_PROMISC) { @@ -7695,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features) struct bnx2 *bp = netdev_priv(dev); if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_RX; + features |= NETIF_F_HW_VLAN_CTAG_RX; return features; } @@ -7706,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features) struct bnx2 *bp = netdev_priv(dev); /* TSO with VLAN tag won't work with current firmware */ - if (features & NETIF_F_HW_VLAN_TX) + if (features & NETIF_F_HW_VLAN_CTAG_TX) dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); else dev->vlan_features &= ~NETIF_F_ALL_TSO; - if ((!!(features & NETIF_F_HW_VLAN_RX) != + if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) != !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && netif_running(dev)) { bnx2_netif_stop(bp, false); @@ -8551,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; dev->vlan_features = dev->hw_features; - dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fdfe33bc097b..1e60c5d139d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12027,7 +12027,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | - NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; + NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; if (!CHIP_IS_E1x(bp)) { dev->hw_features |= NETIF_F_GSO_GRE; dev->hw_enc_features = @@ -12039,7 +12039,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; - dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; if (bp->flags & USING_DAC_FLAG) dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 45719ddfde74..0c22c9a059c4 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17197,7 +17197,7 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index d588f842d557..1d9d0371a743 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3170,14 +3170,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 20d2085f61c5..9624cfe7df57 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -856,10 +856,10 @@ static netdev_features_t t1_fix_features(struct net_device *dev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -869,7 +869,7 @@ static int t1_set_features(struct net_device *dev, netdev_features_t features) netdev_features_t changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) t1_vlan_mode(adapter, features); return 0; @@ -1085,8 +1085,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { netdev->features |= - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->hw_features |= NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; /* T204: disable TSO */ if (!(is_T2(adapter)) || bi->port_number != 4) { diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 55fe8c9f0484..f85e0659432b 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -734,7 +734,7 @@ void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) { struct sge *sge = adapter->sge; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) sge->sge_control |= F_VLAN_XTRACT; else sge->sge_control &= ~F_VLAN_XTRACT; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2b5e62193cea..71497e835f42 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1181,14 +1181,15 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) if (adapter->params.rev > 0) { t3_set_vlan_accel(adapter, 1 << pi->port_id, - features & NETIF_F_HW_VLAN_RX); + features & NETIF_F_HW_VLAN_CTAG_RX); } else { /* single control for all ports */ - unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; + unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX; for_each_port(adapter, i) have_vlans |= - adapter->port[i]->features & NETIF_F_HW_VLAN_RX; + adapter->port[i]->features & + NETIF_F_HW_VLAN_CTAG_RX; t3_set_vlan_accel(adapter, 1, have_vlans); } @@ -2563,10 +2564,10 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -2575,7 +2576,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) cxgb_vlan_mode(dev, features); return 0; @@ -3288,8 +3289,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; - netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features |= netdev->features & VLAN_FEAT; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e76cf035100b..6a6a01af75fd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -559,7 +559,7 @@ static int link_start(struct net_device *dev) * that step explicitly. */ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, - !!(dev->features & NETIF_F_HW_VLAN_RX), true); + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret == 0) { ret = t4_change_mac(pi->adapter, mb, pi->viid, pi->xact_addr_filt, dev->dev_addr, true, @@ -2722,14 +2722,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) netdev_features_t changed = dev->features ^ features; int err; - if (!(changed & NETIF_F_HW_VLAN_RX)) + if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1, - !!(features & NETIF_F_HW_VLAN_RX), true); + !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) - dev->features = features ^ NETIF_F_HW_VLAN_RX; + dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; return err; } @@ -5628,7 +5628,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 7fcac2003769..73aef76a526c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1100,10 +1100,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -1114,9 +1114,9 @@ static int cxgb4vf_set_features(struct net_device *dev, struct port_info *pi = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, - features & NETIF_F_HW_VLAN_TX, 0); + features & NETIF_F_HW_VLAN_CTAG_TX, 0); return 0; } @@ -2623,11 +2623,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA; - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ec1a233622c6..05c1e59b6bff 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2496,9 +2496,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; - netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (ENIC_SETTING(enic, LOOP)) { - netdev->features &= ~NETIF_F_HW_VLAN_TX; + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; enic->loop_enable = 1; enic->loop_tag = enic->config.loop_tag; dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 536afa2fb94c..bde26d4d52ec 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3663,12 +3663,12 @@ static void be_netdev_init(struct net_device *netdev) netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_TX; if (be_multi_rxq(adapter)) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 96fbe3548243..51555445ce2f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -386,7 +386,7 @@ static void gfar_init_mac(struct net_device *ndev) priv->uses_rxfcb = 1; } - if (ndev->features & NETIF_F_HW_VLAN_RX) { + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; priv->uses_rxfcb = 1; } @@ -1050,8 +1050,9 @@ static int gfar_probe(struct platform_device *ofdev) } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { - dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_RX; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { @@ -2348,7 +2349,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) local_irq_save(flags); lock_rx_qs(priv); - if (features & NETIF_F_HW_VLAN_TX) { + if (features & NETIF_F_HW_VLAN_CTAG_TX) { /* Enable VLAN tag insertion */ tempval = gfar_read(®s->tctrl); tempval |= TCTRL_VLINS; @@ -2360,7 +2361,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) gfar_write(®s->tctrl, tempval); } - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* Enable VLAN tag extraction */ tempval = gfar_read(®s->rctrl); tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); @@ -2724,11 +2725,11 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, dev); - /* There's need to check for NETIF_F_HW_VLAN_RX here. + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_RX && + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && fcb->flags & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, fcb->vlctl); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 083603f6bec0..21cd88124ca9 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -542,7 +542,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features) int err = 0, i = 0; netdev_features_t changed = dev->features ^ features; - if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) gfar_vlan_mode(dev, features); if (!(changed & NETIF_F_RXCSUM)) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 029633434474..9c9fa745ff82 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3021,11 +3021,11 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, ehea_set_ethtool_ops(dev); dev->hw_features = NETIF_F_SG | NETIF_F_TSO - | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX; + | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO - | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX - | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER - | NETIF_F_RXCSUM; + | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | + | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index d98e1d0996d4..8d0d0d420c21 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -809,10 +809,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev, /* Since there is no support for separate Rx/Tx vlan accel * enable/disable make sure Tx flag is always in same state as Rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -823,7 +823,7 @@ static int e1000_set_features(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) e1000_vlan_mode(netdev, features); if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) @@ -1058,9 +1058,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac_type >= e1000_82543) { netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_RX; - netdev->features = NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; } if ((hw->mac_type >= e1000_82544) && @@ -4785,7 +4785,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter, u32 ctrl; ctrl = er32(CTRL); - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ ctrl |= E1000_CTRL_VME; } else { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index b18fad5b579e..a2e7db33bf9d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3373,7 +3373,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) ew32(RCTL, rctl); - if (netdev->features & NETIF_F_HW_VLAN_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) e1000e_vlan_strip_enable(adapter); else e1000e_vlan_strip_disable(adapter); @@ -6418,7 +6418,7 @@ static int e1000_set_features(struct net_device *netdev, if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) adapter->flags |= FLAG_TSO_FORCE; - if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | + if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_RXALL))) return 0; @@ -6629,8 +6629,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Set initial default active device features */ netdev->features = (NETIF_F_SG | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | @@ -6644,7 +6644,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXALL; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) - netdev->features |= NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= (NETIF_F_SG | NETIF_F_TSO | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 38590252be64..b0b1777c0af6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1860,10 +1860,10 @@ static netdev_features_t igb_fix_features(struct net_device *netdev, /* Since there is no support for separate Rx/Tx vlan accel * enable/disable make sure Tx flag is always in same state as Rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -1874,7 +1874,7 @@ static int igb_set_features(struct net_device *netdev, netdev_features_t changed = netdev->features ^ features; struct igb_adapter *adapter = netdev_priv(netdev); - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) igb_vlan_mode(netdev, features); if (!(changed & NETIF_F_RXALL)) @@ -2127,15 +2127,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features |= NETIF_F_RXALL; /* set this bit last since it cannot be part of hw_features */ - netdev->features |= NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6 | @@ -6674,7 +6674,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); - if ((dev->features & NETIF_F_HW_VLAN_RX) && + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { u16 vid; if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && @@ -6954,7 +6954,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; - bool enable = !!(features & NETIF_F_HW_VLAN_RX); + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); if (enable) { /* enable VLAN tag insert/strip */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index bea46bb26061..33e7b3069fb6 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2722,9 +2722,9 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5dc119fd95a8..e65d9e910227 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -332,8 +332,8 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features) * Tx VLAN insertion does not work per HW design when Rx stripping is * disabled. */ - if (!(features & NETIF_F_HW_VLAN_RX)) - features &= ~NETIF_F_HW_VLAN_TX; + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -344,7 +344,7 @@ ixgb_set_features(struct net_device *netdev, netdev_features_t features) struct ixgb_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; - if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX))) + if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX))) return 0; adapter->rx_csum = !!(features & NETIF_F_RXCSUM); @@ -479,10 +479,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_RXCSUM; if (pci_using_dac) { @@ -1140,7 +1140,7 @@ ixgb_set_multi(struct net_device *netdev) } alloc_failed: - if (netdev->features & NETIF_F_HW_VLAN_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ixgb_vlan_strip_enable(adapter); else ixgb_vlan_strip_disable(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c022f9c417a6..0316b65dfe06 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1488,7 +1488,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); - if ((dev->features & NETIF_F_HW_VLAN_RX) && + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); __vlan_hwaccel_put_tag(skb, vid); @@ -3722,7 +3722,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (netdev->features & NETIF_F_HW_VLAN_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); @@ -7024,7 +7024,7 @@ static int ixgbe_set_features(struct net_device *netdev, break; } - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); @@ -7431,9 +7431,9 @@ skip_sriov: netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index b3e6530637e3..2d4bdcc4fdbe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -35,7 +35,7 @@ #include #include #include -#ifdef NETIF_F_HW_VLAN_TX +#ifdef NETIF_F_HW_VLAN_CTAG_TX #include #endif diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index eeae9349f78b..8f907b7af319 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3410,9 +3410,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 0519afa413d2..d28ce6f97172 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3030,8 +3030,8 @@ jme_init_one(struct pci_dev *pdev, NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 6a0e671fcecd..bf9da1b7b090 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1421,14 +1421,14 @@ static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) struct sky2_hw *hw = sky2->hw; u16 port = sky2->port; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); else sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); - if (features & NETIF_F_HW_VLAN_TX) { + if (features & NETIF_F_HW_VLAN_CTAG_TX) { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); @@ -4406,7 +4406,7 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) if (changed & NETIF_F_RXHASH) rx_set_rss(dev, features); - if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) sky2_vlan_mode(dev, features); return 0; @@ -4793,7 +4793,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, dev->hw_features |= NETIF_F_RXHASH; if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { - dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features |= SKY2_VLAN_OFFLOADS; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d2a4f919bf1f..b2ba39c7143a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2082,8 +2082,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; dev->features = dev->hw_features | NETIF_F_HIGHDMA | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features |= NETIF_F_LOOPBACK; if (mdev->dev->caps.steering_mode == diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index d5ffdc8264eb..46262ea610fd 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1281,7 +1281,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) va = addr; va += MXGEFW_PAD; veh = (struct vlan_ethhdr *)va; - if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX && + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == + NETIF_F_HW_VLAN_CTAG_RX && veh->h_vlan_proto == htons(ETH_P_8021Q)) { /* fixup csum if needed */ if (skb->ip_summed == CHECKSUM_COMPLETE) { @@ -3887,8 +3888,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->mtu = myri10ge_initial_mtu; netdev->hw_features = mgp->features | NETIF_F_RXCSUM; - /* fake NETIF_F_HW_VLAN_RX for good GRO performance */ - netdev->hw_features |= NETIF_F_HW_VLAN_RX; + /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */ + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->features = netdev->hw_features; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 77c070de621e..60267e91cbda 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -2193,7 +2193,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev, #ifdef NS83820_VLAN_ACCEL_SUPPORT /* We also support hardware vlan acceleration */ - ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; #endif if (using_dac) { diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 3371ff41bb34..ec82d59b03ed 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7920,7 +7920,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO; dev->features |= dev->hw_features | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (sp->device_type & XFRAME_II_DEVICE) { dev->hw_features |= NETIF_F_UFO; if (ufo) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 794444e09492..e9e58aadf87e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3415,12 +3415,12 @@ static int vxge_device_register(struct __vxge_hw_device *hldev, ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_TX; if (vdev->config.rth_steering != NO_STEERING) ndev->hw_features |= NETIF_F_RXHASH; ndev->features |= ndev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &vxge_netdev_ops; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 5ae124719790..fcad64081d74 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2961,11 +2961,11 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) vlanflags = le32_to_cpu(np->get_rx.ex->buflow); /* - * There's need to check for NETIF_F_HW_VLAN_RX here. - * Even if vlan rx accel is disabled, + * There's need to check for NETIF_F_HW_VLAN_CTAG_RX + * here. Even if vlan rx accel is disabled, * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_RX && + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && vlanflags & NV_RX3_VLAN_TAG_PRESENT) { u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; @@ -4816,7 +4816,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev, netdev_features_t features) { /* vlan is dependent on rx checksum offload */ - if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) features |= NETIF_F_RXCSUM; return features; @@ -4828,12 +4828,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) spin_lock_irq(&np->lock); - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; else np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; - if (features & NETIF_F_HW_VLAN_TX) + if (features & NETIF_F_HW_VLAN_CTAG_TX) np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; else np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; @@ -4870,7 +4870,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features) spin_unlock_irq(&np->lock); } - if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) + if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) nv_vlan_mode(dev, features); return 0; @@ -5705,7 +5705,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) np->vlanctl_bits = 0; if (id->driver_data & DEV_HAS_VLAN) { np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; - dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; } dev->features |= dev->hw_features; @@ -5996,7 +5997,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? "csum " : "", - dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? + dev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX) ? "vlan " : "", dev->features & (NETIF_F_LOOPBACK) ? "loopback " : "", diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 7867aebc05f2..af951f343ff6 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1345,7 +1345,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter, } if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) netdev->hw_features |= NETIF_F_LRO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0d00b2bd2c81..845ba1d1c3c9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1714,7 +1714,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO | - NETIF_F_HW_VLAN_RX); + NETIF_F_HW_VLAN_CTAG_RX); netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@ -1729,7 +1729,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, } if (qlcnic_vlan_tx_check(adapter)) - netdev->features |= (NETIF_F_HW_VLAN_TX); + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX); if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) netdev->features |= NETIF_F_LRO; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 1dd778a6f01e..8e3f43c75665 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -409,7 +409,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, (qdev-> func << CAM_OUT_FUNC_SHIFT) | (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) cam_output |= CAM_OUT_RV; /* route to NIC core */ ql_write32(qdev, MAC_ADDR_DATA, cam_output); @@ -2279,7 +2279,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) { struct ql_adapter *qdev = netdev_priv(ndev); - if (features & NETIF_F_HW_VLAN_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) { ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | NIC_RCV_CFG_VLAN_MATCH_AND_NON); } else { @@ -2294,10 +2294,10 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -2307,7 +2307,7 @@ static int qlge_set_features(struct net_device *ndev, { netdev_features_t changed = ndev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) qlge_vlan_mode(ndev, features); return 0; @@ -4665,9 +4665,9 @@ static int qlge_probe(struct pci_dev *pdev, SET_NETDEV_DEV(ndev, &pdev->dev); ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | - NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM; ndev->features = ndev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; ndev->vlan_features = ndev->hw_features; if (test_bit(QL_DMA64, &qdev->flags)) diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index b62a32484f6a..6d03b52e56f1 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1438,7 +1438,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features) else cp->cpcmd &= ~RxChkSum; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) cp->cpcmd |= RxVlanOn; else cp->cpcmd &= ~RxVlanOn; @@ -1955,14 +1955,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &cp_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* disabled by default until verified */ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9a1bc1a23854..86d5d7909d10 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1793,16 +1793,17 @@ static void __rtl8169_set_features(struct net_device *dev, netdev_features_t changed = features ^ dev->features; void __iomem *ioaddr = tp->mmio_addr; - if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX))) + if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX))) return; - if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) { + if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { if (features & NETIF_F_RXCSUM) tp->cp_cmd |= RxChkSum; else tp->cp_cmd &= ~RxChkSum; - if (dev->features & NETIF_F_HW_VLAN_RX) + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) tp->cp_cmd |= RxVlan; else tp->cp_cmd &= ~RxVlan; @@ -7036,16 +7037,17 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* don't enable SG, IP_CSUM and TSO by default - it might not work * properly for all devices */ dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; if (tp->mac_version == RTL_GIGA_MAC_VER_05) /* 8110SCd requires hardware Rx VLAN - disallow toggling */ - dev->hw_features &= ~NETIF_F_HW_VLAN_RX; + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a7499cbf4503..3d4a1ed0a7ab 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2749,7 +2749,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } mdp->port = devno % 2; - ndev->features = NETIF_F_HW_VLAN_FILTER; + ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; } /* initialize first or needed device */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 71b64857e3a6..618446ae1ec1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2679,7 +2679,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ - ndev->features |= NETIF_F_HW_VLAN_RX; + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index e8824cea093b..5ca4b33fc4c1 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2017,12 +2017,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * so we can have them same for all ports of the board */ ndev->if_port = port; ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO - | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM + | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM /*| NETIF_F_FRAGLIST */ ; ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_HW_VLAN_TX; + NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; if (pci_using_dac) ndev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1d740423a053..084992981cef 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1599,7 +1599,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->num_irqs = priv->num_irqs; } - ndev->features |= NETIF_F_HW_VLAN_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); @@ -1837,7 +1837,7 @@ static int cpsw_probe(struct platform_device *pdev) k++; } - ndev->features |= NETIF_F_HW_VLAN_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index fef6b59e69c9..c655fe60121e 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2329,8 +2329,8 @@ spider_net_setup_netdev(struct spider_net_card *card) if (SPIDER_NET_RX_CSUM_DEFAULT) netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; - /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | - * NETIF_F_HW_VLAN_FILTER */ + /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + * NETIF_F_HW_VLAN_CTAG_FILTER */ netdev->irq = card->pdev->irq; card->num_rx_ints = 0; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 185c721c52d7..37b02c3768be 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1026,8 +1026,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; if (pdev->revision >= VT6105M) - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1bc7f9fd2583..c1c55a7da941 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2810,9 +2810,10 @@ static int velocity_found1(struct pci_dev *pdev, dev->ethtool_ops = &velocity_ethtool_ops; netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | - NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_TX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM; ret = register_netdev(dev); if (ret < 0) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 4a7c60f4c83d..57c2e5ef2804 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1018,9 +1018,9 @@ static int temac_of_probe(struct platform_device *op) ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ - ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */ - ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */ - ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */ ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..4559bb8115bf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -431,7 +431,7 @@ static int netvsc_probe(struct hv_device *dev, /* TODO: Add GSO and Checksum offload */ net->hw_features = NETIF_F_SG; - net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX; + net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; SET_ETHTOOL_OPS(net, ðtool_ops); SET_NETDEV_DEV(net, &dev->device); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 82164381f778..724ce7a36c9b 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -166,7 +166,7 @@ static const struct net_device_ops ifb_netdev_ops = { #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ - NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) + NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX) static void ifb_setup(struct net_device *dev) { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 70af6dc07d40..fedd34cff91c 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -471,7 +471,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ - NETIF_F_HW_VLAN_FILTER) + NETIF_F_HW_VLAN_CTAG_FILTER) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 9a31e8e50fac..9290eb23d664 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1841,9 +1841,9 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; dev->hw_features = TEAM_VLAN_FEATURES | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->features |= dev->hw_features; diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 16c842997291..b7e9cd7ca6d5 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -101,7 +101,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->flags |= IFF_NOARP; /* no need to put the VLAN tci in the packet headers */ - dev->net->features |= NETIF_F_HW_VLAN_TX; + dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX; err: return ret; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 07a4af0aa3dc..f116c593d879 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -255,7 +255,7 @@ static const struct net_device_ops veth_netdev_ops = { #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX) + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX) static void veth_setup(struct net_device *dev) { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fdfde6832ab..b61d57ab3c63 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1376,7 +1376,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) - vi->dev->features |= NETIF_F_HW_VLAN_FILTER; + vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } for (i = 0; i < vi->max_queue_pairs; i++) { diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index eae7a03d4f9b..ba9bdad39986 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2107,7 +2107,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); } - if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); @@ -2669,14 +2669,15 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) struct net_device *netdev = adapter->netdev; netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; if (dma64) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->vlan_features = netdev->hw_features & - ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; } diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 63a124340cbe..600ab56c0008 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -263,7 +263,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) unsigned long flags; netdev_features_t changed = features ^ netdev->features; - if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | + NETIF_F_HW_VLAN_CTAG_RX)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; @@ -279,7 +280,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXVLAN; else diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..90ddd823605c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -959,7 +959,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); else SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops); - card->dev->features |= NETIF_F_HW_VLAN_FILTER; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); SET_NETDEV_DEV(card->dev, &card->gdev->dev); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8710337dab3e..04261bc08f20 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3294,9 +3294,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); - card->dev->features |= NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; card->dev->gso_max_size = 15 * PAGE_SIZE; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9bfdc9a3f897..292b24f9bf93 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1655,7 +1655,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) skb->priority = fcoe->priority; if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && - fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { + fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { skb->vlan_tci = VLAN_TAG_PRESENT | vlan_dev_vlan_id(fcoe->netdev); skb->dev = fcoe->realdev; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 70962f3fdb79..fee28291a824 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -238,7 +238,7 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, */ static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) { - if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { return __vlan_hwaccel_put_tag(skb, vlan_tci); } else { return __vlan_put_tag(skb, vlan_tci); @@ -294,7 +294,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, */ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { - if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { return __vlan_hwaccel_get_tag(skb, vlan_tci); } else { return __vlan_get_tag(skb, vlan_tci); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index d6ee2d008ee4..785913b8983d 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -22,9 +22,9 @@ enum { NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ - NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ - NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ - NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ + NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ + NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ + NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ NETIF_F_GSO_BIT, /* Enable software GSO. */ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ @@ -80,9 +80,9 @@ enum { #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) #define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) -#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) -#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) -#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) +#define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER) +#define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX) +#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX) #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) #define NETIF_F_LLTX __NETIF_F(LLTX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 623b57b52195..7eb7e03ee417 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -785,11 +785,13 @@ struct netdev_fcoe_hbainfo { * neither operation. * * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); - * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * If device support VLAN filtering (dev->features & + * NETIF_F_HW_VLAN_CTAG_FILTER) * this function is called when a VLAN id is registered. * * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); - * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * If device support VLAN filtering (dev->features & + * NETIF_F_HW_VLAN_CTAG_FILTER) * this function is called when a VLAN id is unregistered. * * void (*ndo_poll_controller)(struct net_device *dev); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 85addcd9372b..d913feed0757 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -301,7 +301,7 @@ static void vlan_transfer_features(struct net_device *dev, { vlandev->gso_max_size = dev->gso_max_size; - if (dev->features & NETIF_F_HW_VLAN_TX) + if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) vlandev->hard_header_len = dev->hard_header_len; else vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; @@ -347,7 +347,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, __vlan_device_event(dev, event); if ((event == NETDEV_UP) && - (dev->features & NETIF_F_HW_VLAN_FILTER)) { + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name); vlan_vid_add(dev, 0); @@ -415,7 +415,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, break; case NETDEV_DOWN: - if (dev->features & NETIF_F_HW_VLAN_FILTER) + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) vlan_vid_del(dev, 0); /* Put all VLANs for this dev in the down state too. */ diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index f3b6f515eba6..3df29d344704 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -225,7 +225,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, if (!vid_info) return -ENOMEM; - if (dev->features & NETIF_F_HW_VLAN_FILTER) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { err = ops->ndo_vlan_rx_add_vid(dev, vid); if (err) { kfree(vid_info); @@ -282,7 +282,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, unsigned short vid = vid_info->vid; int err; - if (dev->features & NETIF_F_HW_VLAN_FILTER) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { err = ops->ndo_vlan_rx_kill_vid(dev, vid); if (err) { pr_warn("failed to kill vid %d for device %s\n", diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 19cf81bf9f69..5c4892a86410 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -583,7 +583,7 @@ static int vlan_dev_init(struct net_device *dev) #endif dev->needed_headroom = real_dev->needed_headroom; - if (real_dev->features & NETIF_F_HW_VLAN_TX) { + if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { dev->header_ops = real_dev->header_ops; dev->hard_header_len = real_dev->hard_header_len; } else { diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 314c73ed418f..967312803e41 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -348,10 +348,10 @@ void br_dev_setup(struct net_device *dev) dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | - NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; + NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_TX; + NETIF_F_HW_VLAN_CTAG_TX; br->dev = dev; spin_lock_init(&br->lock); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 93dde75923f0..0b3dbbec80d0 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -54,7 +54,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) dev = br->dev; } - if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) { + if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { /* Add VLAN to the device filter if it is supported. * Stricly speaking, this is not necessary now, since * devices are made promiscuous by the bridge, but if @@ -82,7 +82,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) return 0; out_filt: - if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) + if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); return err; } @@ -98,7 +98,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) if (v->port_idx && vid) { struct net_device *dev = v->parent.port->dev; - if (dev->features & NETIF_F_HW_VLAN_FILTER) + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); } diff --git a/net/core/dev.c b/net/core/dev.c index 3655ff927315..07a8e9dc43fc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2435,13 +2435,13 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) return harmonize_features(skb, protocol, features); } - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); + features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX); if (protocol != htons(ETH_P_8021Q)) { return harmonize_features(skb, protocol, features); } else { features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | - NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; + NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX; return harmonize_features(skb, protocol, features); } } @@ -2482,7 +2482,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, features = netif_skb_features(skb); if (vlan_tx_tag_present(skb) && - !(features & NETIF_F_HW_VLAN_TX)) { + !(features & NETIF_F_HW_VLAN_CTAG_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; @@ -5180,7 +5180,8 @@ int register_netdevice(struct net_device *dev) } } - if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) && + if (((dev->hw_features | dev->features) & + NETIF_F_HW_VLAN_CTAG_FILTER) && (!dev->netdev_ops->ndo_vlan_rx_add_vid || !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index adc1351e6873..b87712cfd26c 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", - [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", + [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", - [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", - [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", + [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", + [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", [NETIF_F_GSO_BIT] = "tx-generic-segmentation", [NETIF_F_LLTX_BIT] = "tx-lockless", @@ -267,18 +267,19 @@ static int ethtool_set_one_feature(struct net_device *dev, #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) -#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ - NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) +#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \ + NETIF_F_RXHASH) static u32 __ethtool_get_flags(struct net_device *dev) { u32 flags = 0; - if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; - if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; - if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; - if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; - if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; + if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN; + if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN; + if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; + if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; return flags; } @@ -291,8 +292,8 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data) return -EINVAL; if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; - if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; - if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; + if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX; + if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX; if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a3a17aed3639..8de961e67cf7 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -383,7 +383,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, if (__netif_tx_trylock(txq)) { if (!netif_xmit_stopped(txq)) { if (vlan_tx_tag_present(skb) && - !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { + !(netif_skb_features(skb) & NETIF_F_HW_VLAN_CTAG_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) break; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 9604760494b1..73682de8dc69 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev) NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; netdev->vlan_features = netdev->features; - netdev->features |= NETIF_F_HW_VLAN_TX; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = netdev->features & ~NETIF_F_LLTX; eth_hw_addr_random(netdev); } -- cgit v1.2.3-70-g09d2 From 80d5c3689b886308247da295a228a54df49a44f6 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 19 Apr 2013 02:04:28 +0000 Subject: net: vlan: prepare for 802.1ad VLAN filtering offload Change the rx_{add,kill}_vid callbacks to take a protocol argument in preparation of 802.1ad support. The protocol argument used so far is always htons(ETH_P_8021Q). Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 17 ++++---- drivers/net/ethernet/adaptec/starfire.c | 6 ++- drivers/net/ethernet/brocade/bna/bnad.c | 6 +-- drivers/net/ethernet/cisco/enic/enic_dev.c | 4 +- drivers/net/ethernet/cisco/enic/enic_dev.h | 4 +- drivers/net/ethernet/emulex/benet/be_main.c | 4 +- drivers/net/ethernet/ibm/ehea/ehea_main.c | 4 +- drivers/net/ethernet/intel/e1000/e1000_main.c | 22 +++++++---- drivers/net/ethernet/intel/e1000e/netdev.c | 20 ++++++---- drivers/net/ethernet/intel/igb/igb_main.c | 12 +++--- drivers/net/ethernet/intel/igbvf/netdev.c | 8 ++-- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 12 +++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 +++-- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 9 +++-- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 6 ++- drivers/net/ethernet/neterion/vxge/vxge-main.c | 8 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 8 ++-- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 4 +- drivers/net/ethernet/renesas/sh_eth.c | 6 ++- drivers/net/ethernet/tehuti/tehuti.c | 4 +- drivers/net/ethernet/ti/cpsw.c | 4 +- drivers/net/ethernet/via/via-rhine.c | 10 +++-- drivers/net/ethernet/via/via-velocity.c | 6 ++- drivers/net/macvlan.c | 8 ++-- drivers/net/team/team.c | 10 ++--- drivers/net/virtio_net.c | 6 ++- drivers/net/vmxnet3/vmxnet3_drv.c | 4 +- drivers/s390/net/qeth_l2_main.c | 6 ++- drivers/s390/net/qeth_l3_main.c | 6 ++- include/linux/if_vlan.h | 4 +- include/linux/netdevice.h | 16 ++++---- net/8021q/vlan.c | 10 ++--- net/8021q/vlan_core.c | 47 +++++++++++++---------- net/bridge/br_vlan.c | 10 +++-- 34 files changed, 184 insertions(+), 137 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8d324f8a1757..35e89e12a1f7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -428,14 +428,15 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, * @bond_dev: bonding net device that got called * @vid: vlan id being added */ -static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) +static int bond_vlan_rx_add_vid(struct net_device *bond_dev, + __be16 proto, u16 vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *stop_at; int i, res; bond_for_each_slave(bond, slave, i) { - res = vlan_vid_add(slave->dev, vid); + res = vlan_vid_add(slave->dev, proto, vid); if (res) goto unwind; } @@ -453,7 +454,7 @@ unwind: /* unwind from head to the slave that failed */ stop_at = slave; bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) - vlan_vid_del(slave->dev, vid); + vlan_vid_del(slave->dev, proto, vid); return res; } @@ -463,14 +464,15 @@ unwind: * @bond_dev: bonding net device that got called * @vid: vlan id being removed */ -static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) +static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, + __be16 proto, u16 vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i, res; bond_for_each_slave(bond, slave, i) - vlan_vid_del(slave->dev, vid); + vlan_vid_del(slave->dev, proto, vid); res = bond_del_vlan(bond, vid); if (res) { @@ -488,7 +490,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla int res; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - res = vlan_vid_add(slave_dev, vlan->vlan_id); + res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q), + vlan->vlan_id); if (res) pr_warning("%s: Failed to add vlan id %d to device %s\n", bond->dev->name, vlan->vlan_id, @@ -504,7 +507,7 @@ static void bond_del_vlans_from_slave(struct bonding *bond, list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { if (!vlan->vlan_id) continue; - vlan_vid_del(slave_dev, vlan->vlan_id); + vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id); } } diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 365865130f7c..cdbc5443ae3b 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -594,7 +594,8 @@ static const struct ethtool_ops ethtool_ops; #ifdef VLAN_SUPPORT -static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int netdev_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct netdev_private *np = netdev_priv(dev); @@ -608,7 +609,8 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int netdev_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct netdev_private *np = netdev_priv(dev); diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 1d9d0371a743..c0bc44e51fa9 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3068,8 +3068,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu) } static int -bnad_vlan_rx_add_vid(struct net_device *netdev, - unsigned short vid) +bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; @@ -3090,8 +3089,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev, } static int -bnad_vlan_rx_kill_vid(struct net_device *netdev, - unsigned short vid) +bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c index bf0fc56dba19..4b6e5695b263 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.c +++ b/drivers/net/ethernet/cisco/enic/enic_dev.c @@ -212,7 +212,7 @@ int enic_dev_deinit_done(struct enic *enic, int *status) } /* rtnl lock is held */ -int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; @@ -225,7 +225,7 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } /* rtnl lock is held */ -int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h index da1cba3c410e..08bded051b93 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, int broadcast, int promisc, int allmulti); int enic_dev_add_addr(struct enic *enic, u8 *addr); int enic_dev_del_addr(struct enic *enic, u8 *addr); -int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); int enic_dev_notify_unset(struct enic *enic); int enic_dev_hang_notify(struct enic *enic); int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index bde26d4d52ec..b41333184916 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -902,7 +902,7 @@ set_vlan_promisc: return status; } -static int be_vlan_add_vid(struct net_device *netdev, u16 vid) +static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; @@ -928,7 +928,7 @@ ret: return status; } -static int be_vlan_rem_vid(struct net_device *netdev, u16 vid) +static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 9c9fa745ff82..d1812aacbc7b 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2110,7 +2110,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; @@ -2148,7 +2148,7 @@ out: return err; } -static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8d0d0d420c21..8239dafdd8e7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev, netdev_features_t features); static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, bool filter_on); -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); #ifdef CONFIG_PM @@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) if (!test_bit(vid, adapter->active_vlans)) { if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { - e1000_vlan_rx_add_vid(netdev, vid); + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; @@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid) && !test_bit(old_vid, adapter->active_vlans)) - e1000_vlan_rx_kill_vid(netdev, old_vid); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); } else { adapter->mng_vlan_id = vid; } @@ -1457,7 +1460,8 @@ static int e1000_close(struct net_device *netdev) if ((hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); } return 0; @@ -4837,7 +4841,8 @@ static void e1000_vlan_mode(struct net_device *netdev, e1000_irq_enable(adapter); } -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4862,7 +4867,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4896,7 +4902,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter) e1000_vlan_filter_on_off(adapter, true); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a2e7db33bf9d..8c17f01a155f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2672,7 +2672,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) return work_done; } -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2697,7 +2698,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2741,7 +2743,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) ew32(RCTL, rctl); if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; } } @@ -2802,22 +2805,22 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) u16 old_vid = adapter->mng_vlan_id; if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - e1000_vlan_rx_add_vid(netdev, vid); + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); adapter->mng_vlan_id = vid; } if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) - e1000_vlan_rx_kill_vid(netdev, old_vid); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); } static void e1000_restore_vlan(struct e1000_adapter *adapter) { u16 vid; - e1000_vlan_rx_add_vid(adapter->netdev, 0); + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static void e1000_init_manageability_pt(struct e1000_adapter *adapter) @@ -4384,7 +4387,8 @@ static int e1000_close(struct net_device *netdev) * the same ID is registered on the host OS (let 8021q kill it) */ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); /* If AMT is enabled, let the firmware know that the network * interface is now closed diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b0b1777c0af6..d13ea71c7c1f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -159,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); -static int igb_vlan_rx_add_vid(struct net_device *, u16); -static int igb_vlan_rx_kill_vid(struct net_device *, u16); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); static void igb_restore_vlan(struct igb_adapter *); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); @@ -6976,7 +6976,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) igb_rlpml_set(adapter); } -static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -6993,7 +6994,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -7019,7 +7021,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter) igb_vlan_mode(adapter->netdev, adapter->netdev->features); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - igb_vlan_rx_add_vid(adapter->netdev, vid); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 33e7b3069fb6..3854fd698b85 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1230,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) e1000_rlpml_set_vf(hw, max_frame_size); } -static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1243,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1262,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter) u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - igbvf_vlan_rx_add_vid(adapter->netdev, vid); + igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } /** diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index e65d9e910227..a32f274acd36 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -101,8 +101,10 @@ static void ixgb_tx_timeout_task(struct work_struct *work); static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); -static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static int ixgb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2209,7 +2211,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) } static int -ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); u32 vfta, index; @@ -2226,7 +2228,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) } static int -ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgb_adapter *adapter = netdev_priv(netdev); u32 vfta, index; @@ -2248,7 +2250,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgb_vlan_rx_add_vid(adapter->netdev, vid); + ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0316b65dfe06..3becffc77321 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3467,7 +3467,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) hw->mac.ops.enable_rx_dma(hw, rxctrl); } -static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3479,7 +3480,8 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3584,10 +3586,10 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid; - ixgbe_vlan_rx_add_vid(adapter->netdev, 0); + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgbe_vlan_rx_add_vid(adapter->netdev, vid); + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } /** diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8f907b7af319..4bc1f84c9352 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1179,7 +1179,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) } } -static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1204,7 +1205,8 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return err; } -static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -1227,7 +1229,8 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); + ixgbevf_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); } static int ixgbevf_write_uc_addr_list(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b2ba39c7143a..e7e27842d8d4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -356,7 +356,8 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) } #endif -static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -381,7 +382,8 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index e9e58aadf87e..a9396142201c 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3300,12 +3300,13 @@ static void vxge_tx_watchdog(struct net_device *dev) /** * vxge_vlan_rx_add_vid * @dev: net device pointer. + * @proto: vlan protocol * @vid: vid * * Add the vlan id to the devices vlan id table */ static int -vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath; @@ -3323,14 +3324,15 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) } /** - * vxge_vlan_rx_add_vid + * vxge_vlan_rx_kill_vid * @dev: net device pointer. + * @proto: vlan protocol * @vid: vid * * Remove the vlan id from the device's vlan id table */ static int -vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 845ba1d1c3c9..e88e01312c67 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -86,8 +86,8 @@ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); static void qlcnic_set_netdev_features(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); -static int qlcnic_vlan_rx_add(struct net_device *, u16); -static int qlcnic_vlan_rx_del(struct net_device *, u16); +static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); +static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); #define QLCNIC_IS_TSO_CAPABLE(adapter) \ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) @@ -902,7 +902,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, } static int -qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) +qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); set_bit(vid, adapter->vlans); @@ -910,7 +910,7 @@ qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) } static int -qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) +qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8e3f43c75665..a9016acc2d6a 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2326,7 +2326,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) return err; } -static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; @@ -2357,7 +2357,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) return err; } -static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 3d4a1ed0a7ab..b8e52cd1a698 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2448,7 +2448,8 @@ static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) return TSU_VTAG1; } -static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) { struct sh_eth_private *mdp = netdev_priv(ndev); int vtag_reg_index = sh_eth_get_vtag_index(mdp); @@ -2478,7 +2479,8 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) return 0; } -static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) { struct sh_eth_private *mdp = netdev_priv(ndev); int vtag_reg_index = sh_eth_get_vtag_index(mdp); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 5ca4b33fc4c1..f7050c573519 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -733,7 +733,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) * @ndev: network device * @vid: VLAN vid to add */ -static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) +static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { __bdx_vlan_rx_vid(ndev, vid, 1); return 0; @@ -744,7 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) * @ndev: network device * @vid: VLAN vid to kill */ -static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) +static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { __bdx_vlan_rx_vid(ndev, vid, 0); return 0; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 084992981cef..5cf8d03b8cae 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1251,7 +1251,7 @@ clean_vid: } static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, - unsigned short vid) + __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1263,7 +1263,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, } static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, - unsigned short vid) + __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); int ret; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 37b02c3768be..c6014916f622 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -508,8 +508,10 @@ static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); -static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); -static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); +static int rhine_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid); +static int rhine_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid); static void rhine_restart_tx(struct net_device *dev); static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) @@ -1415,7 +1417,7 @@ static void rhine_update_vcam(struct net_device *dev) rhine_set_vlan_cam_mask(ioaddr, vCAMmask); } -static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct rhine_private *rp = netdev_priv(dev); @@ -1426,7 +1428,7 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct rhine_private *rp = netdev_priv(dev); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index c1c55a7da941..91cd59146c24 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -525,7 +525,8 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) mac_set_vlan_cam_mask(regs, vptr->vCAMmask); } -static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int velocity_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -536,7 +537,8 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int velocity_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct velocity_info *vptr = netdev_priv(dev); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fedd34cff91c..7347cdbe736f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -567,21 +567,21 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, } static int macvlan_vlan_rx_add_vid(struct net_device *dev, - unsigned short vid) + __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - return vlan_vid_add(lowerdev, vid); + return vlan_vid_add(lowerdev, proto, vid); } static int macvlan_vlan_rx_kill_vid(struct net_device *dev, - unsigned short vid) + __be16 proto, u16 vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - vlan_vid_del(lowerdev, vid); + vlan_vid_del(lowerdev, proto, vid); return 0; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 9290eb23d664..7c43261975bd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1598,7 +1598,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) return stats; } -static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) +static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1610,7 +1610,7 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) */ mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = vlan_vid_add(port->dev, vid); + err = vlan_vid_add(port->dev, proto, vid); if (err) goto unwind; } @@ -1620,20 +1620,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) - vlan_vid_del(port->dev, vid); + vlan_vid_del(port->dev, proto, vid); mutex_unlock(&team->lock); return err; } -static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) +static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) - vlan_vid_del(port->dev, vid); + vlan_vid_del(port->dev, proto, vid); rcu_read_unlock(); return 0; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b61d57ab3c63..50077753a0e5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1006,7 +1006,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) kfree(buf); } -static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) +static int virtnet_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; @@ -1019,7 +1020,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) return 0; } -static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) +static int virtnet_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index ba9bdad39986..27b889992ab8 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1931,7 +1931,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) static int -vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1953,7 +1953,7 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int -vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 90ddd823605c..e68f79b38556 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -302,7 +302,8 @@ static void qeth_l2_process_vlans(struct qeth_card *card) spin_unlock_bh(&card->vlanlock); } -static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; @@ -331,7 +332,8 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 04261bc08f20..642686846a14 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1824,7 +1824,8 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card, rcu_read_unlock(); } -static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; @@ -1832,7 +1833,8 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) return 0; } -static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; unsigned long flags; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index fee28291a824..fcb9ef82aae1 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -93,8 +93,8 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern bool vlan_do_receive(struct sk_buff **skb); extern struct sk_buff *vlan_untag(struct sk_buff *skb); -extern int vlan_vid_add(struct net_device *dev, unsigned short vid); -extern void vlan_vid_del(struct net_device *dev, unsigned short vid); +extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); +extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); extern int vlan_vids_add_by_dev(struct net_device *dev, const struct net_device *by_dev); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7eb7e03ee417..f8898a435dc5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -784,15 +784,13 @@ struct netdev_fcoe_hbainfo { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); - * If device support VLAN filtering (dev->features & - * NETIF_F_HW_VLAN_CTAG_FILTER) - * this function is called when a VLAN id is registered. + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); + * If device support VLAN filtering this function is called when a + * VLAN id is registered. * * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); - * If device support VLAN filtering (dev->features & - * NETIF_F_HW_VLAN_CTAG_FILTER) - * this function is called when a VLAN id is unregistered. + * If device support VLAN filtering this function is called when a + * VLAN id is unregistered. * * void (*ndo_poll_controller)(struct net_device *dev); * @@ -936,9 +934,9 @@ struct net_device_ops { struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); int (*ndo_vlan_rx_add_vid)(struct net_device *dev, - unsigned short vid); + __be16 proto, u16 vid); int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, - unsigned short vid); + __be16 proto, u16 vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); int (*ndo_netpoll_setup)(struct net_device *dev, diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index d913feed0757..447c5c93434f 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) * VLAN is not 0 (leave it there for 802.1p). */ if (vlan_id) - vlan_vid_del(real_dev, vlan_id); + vlan_vid_del(real_dev, htons(ETH_P_8021Q), vlan_id); /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); @@ -142,7 +142,7 @@ int register_vlan_dev(struct net_device *dev) struct vlan_group *grp; int err; - err = vlan_vid_add(real_dev, vlan_id); + err = vlan_vid_add(real_dev, htons(ETH_P_8021Q), vlan_id); if (err) return err; @@ -195,7 +195,7 @@ out_uninit_gvrp: if (grp->nr_vlan_devs == 0) vlan_gvrp_uninit_applicant(real_dev); out_vid_del: - vlan_vid_del(real_dev, vlan_id); + vlan_vid_del(real_dev, htons(ETH_P_8021Q), vlan_id); return err; } @@ -350,7 +350,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name); - vlan_vid_add(dev, 0); + vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } vlan_info = rtnl_dereference(dev->vlan_info); @@ -416,7 +416,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, case NETDEV_DOWN: if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) - vlan_vid_del(dev, 0); + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); /* Put all VLANs for this dev in the down state too. */ for (i = 0; i < VLAN_N_VID; i++) { diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 3df29d344704..04e3b95a0d48 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -185,35 +185,37 @@ static struct vlan_info *vlan_info_alloc(struct net_device *dev) struct vlan_vid_info { struct list_head list; - unsigned short vid; + __be16 proto; + u16 vid; int refcount; }; static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, - unsigned short vid) + __be16 proto, u16 vid) { struct vlan_vid_info *vid_info; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { - if (vid_info->vid == vid) + if (vid_info->proto == proto && vid_info->vid == vid) return vid_info; } return NULL; } -static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) +static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) { struct vlan_vid_info *vid_info; vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); if (!vid_info) return NULL; + vid_info->proto = proto; vid_info->vid = vid; return vid_info; } -static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, +static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, struct vlan_vid_info **pvid_info) { struct net_device *dev = vlan_info->real_dev; @@ -221,12 +223,13 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, struct vlan_vid_info *vid_info; int err; - vid_info = vlan_vid_info_alloc(vid); + vid_info = vlan_vid_info_alloc(proto, vid); if (!vid_info) return -ENOMEM; - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - err = ops->ndo_vlan_rx_add_vid(dev, vid); + if (proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + err = ops->ndo_vlan_rx_add_vid(dev, proto, vid); if (err) { kfree(vid_info); return err; @@ -238,7 +241,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, return 0; } -int vlan_vid_add(struct net_device *dev, unsigned short vid) +int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; @@ -254,9 +257,9 @@ int vlan_vid_add(struct net_device *dev, unsigned short vid) return -ENOMEM; vlan_info_created = true; } - vid_info = vlan_vid_info_get(vlan_info, vid); + vid_info = vlan_vid_info_get(vlan_info, proto, vid); if (!vid_info) { - err = __vlan_vid_add(vlan_info, vid, &vid_info); + err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); if (err) goto out_free_vlan_info; } @@ -279,14 +282,16 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, { struct net_device *dev = vlan_info->real_dev; const struct net_device_ops *ops = dev->netdev_ops; - unsigned short vid = vid_info->vid; + __be16 proto = vid_info->proto; + u16 vid = vid_info->vid; int err; - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - err = ops->ndo_vlan_rx_kill_vid(dev, vid); + if (proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); if (err) { - pr_warn("failed to kill vid %d for device %s\n", - vid, dev->name); + pr_warn("failed to kill vid %04x/%d for device %s\n", + proto, vid, dev->name); } } list_del(&vid_info->list); @@ -294,7 +299,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, vlan_info->nr_vids--; } -void vlan_vid_del(struct net_device *dev, unsigned short vid) +void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; @@ -305,7 +310,7 @@ void vlan_vid_del(struct net_device *dev, unsigned short vid) if (!vlan_info) return; - vid_info = vlan_vid_info_get(vlan_info, vid); + vid_info = vlan_vid_info_get(vlan_info, proto, vid); if (!vid_info) return; vid_info->refcount--; @@ -333,7 +338,7 @@ int vlan_vids_add_by_dev(struct net_device *dev, return 0; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { - err = vlan_vid_add(dev, vid_info->vid); + err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); if (err) goto unwind; } @@ -343,7 +348,7 @@ unwind: list_for_each_entry_continue_reverse(vid_info, &vlan_info->vid_list, list) { - vlan_vid_del(dev, vid_info->vid); + vlan_vid_del(dev, vid_info->proto, vid_info->vid); } return err; @@ -363,7 +368,7 @@ void vlan_vids_del_by_dev(struct net_device *dev, return; list_for_each_entry(vid_info, &vlan_info->vid_list, list) - vlan_vid_del(dev, vid_info->vid); + vlan_vid_del(dev, vid_info->proto, vid_info->vid); } EXPORT_SYMBOL(vlan_vids_del_by_dev); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 0b3dbbec80d0..c3076e2c4294 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -34,6 +34,7 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags) static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) { + const struct net_device_ops *ops; struct net_bridge_port *p = NULL; struct net_bridge *br; struct net_device *dev; @@ -53,6 +54,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) br = v->parent.br; dev = br->dev; } + ops = dev->netdev_ops; if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { /* Add VLAN to the device filter if it is supported. @@ -61,7 +63,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) * that ever changes this code will allow tagged * traffic to enter the bridge. */ - err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid); + err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), + vid); if (err) return err; } @@ -83,7 +86,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) out_filt: if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); + ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); return err; } @@ -97,9 +100,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) if (v->port_idx && vid) { struct net_device *dev = v->parent.port->dev; + const struct net_device_ops *ops = dev->netdev_ops; if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) - dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); + ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid); } clear_bit(vid, v->vlan_bitmap); -- cgit v1.2.3-70-g09d2 From 1fd9b1fc310314911f66d2f14a8e4f0ef37bf47b Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 19 Apr 2013 02:04:29 +0000 Subject: net: vlan: prepare for 802.1ad support Make the encapsulation protocol value a property of VLAN devices and change the device lookup functions to take the protocol value into account. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 8 +- drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 +- include/linux/if_vlan.h | 2 +- net/8021q/vlan.c | 87 ++++++++-------------- net/8021q/vlan.h | 54 +++++++++++--- net/8021q/vlan_core.c | 10 ++- net/8021q/vlan_dev.c | 7 +- net/8021q/vlan_gvrp.c | 4 + net/8021q/vlan_mvrp.c | 4 + net/8021q/vlan_netlink.c | 9 ++- net/bridge/br_netfilter.c | 3 +- 12 files changed, 108 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 35e89e12a1f7..1e79a7643f08 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -782,7 +782,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) /* rejoin all groups on vlan devices */ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - vlan_dev = __vlan_find_dev_deep(bond_dev, + vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q), vlan->vlan_id); if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); @@ -2512,7 +2512,8 @@ static int bond_has_this_ip(struct bonding *bond, __be32 ip) list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { rcu_read_lock(); - vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); + vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q), + vlan->vlan_id); rcu_read_unlock(); if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip)) return 1; @@ -2541,7 +2542,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ return; } if (vlan_id) { - skb = vlan_put_tag(skb, vlan_id); + skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id); if (!skb) { pr_err("failed to insert VLAN tag\n"); return; @@ -2603,6 +2604,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { rcu_read_lock(); vlan_dev = __vlan_find_dev_deep(bond->dev, + htons(ETH_P_8021Q), vlan->vlan_id); rcu_read_unlock(); if (vlan_dev == rt->dst.dev) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 4232767862b5..0c96e5fe99cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter, if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { rcu_read_lock(); if (vlan && vlan != VLAN_VID_MASK) { - dev = __vlan_find_dev_deep(dev, vlan); + dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan); } else if (netif_is_bond_slave(dev)) { struct net_device *upper_dev; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index e88e01312c67..d132765f92af 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3346,7 +3346,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) rcu_read_lock(); for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { - dev = __vlan_find_dev_deep(netdev, vid); + dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid); if (!dev) continue; qlcnic_config_indev_addr(adapter, dev, event); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index fcb9ef82aae1..2c9fb65f8267 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -86,7 +86,7 @@ static inline int is_vlan_dev(struct net_device *dev) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, - u16 vlan_id); + __be16 vlan_proto, u16 vlan_id); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 447c5c93434f..9424f3718ea7 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -51,14 +51,18 @@ const char vlan_version[] = DRV_VERSION; /* End of global variables definitions. */ -static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) +static int vlan_group_prealloc_vid(struct vlan_group *vg, + __be16 vlan_proto, u16 vlan_id) { struct net_device **array; + unsigned int pidx, vidx; unsigned int size; ASSERT_RTNL(); - array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + pidx = vlan_proto_idx(vlan_proto); + vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; + array = vg->vlan_devices_arrays[pidx][vidx]; if (array != NULL) return 0; @@ -67,7 +71,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) if (array == NULL) return -ENOBUFS; - vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; + vg->vlan_devices_arrays[pidx][vidx] = array; return 0; } @@ -93,7 +97,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_leave(dev); - vlan_group_set_device(grp, vlan_id, NULL); + vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); /* Because unregister_netdevice_queue() makes sure at least one rcu * grace period is respected before device freeing, * we dont need to call synchronize_net() here. @@ -112,13 +116,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) * VLAN is not 0 (leave it there for 802.1p). */ if (vlan_id) - vlan_vid_del(real_dev, htons(ETH_P_8021Q), vlan_id); + vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } -int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) +int vlan_check_real_dev(struct net_device *real_dev, + __be16 protocol, u16 vlan_id) { const char *name = real_dev->name; @@ -127,7 +132,7 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) return -EOPNOTSUPP; } - if (vlan_find_dev(real_dev, vlan_id) != NULL) + if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) return -EEXIST; return 0; @@ -142,7 +147,7 @@ int register_vlan_dev(struct net_device *dev) struct vlan_group *grp; int err; - err = vlan_vid_add(real_dev, htons(ETH_P_8021Q), vlan_id); + err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); if (err) return err; @@ -160,7 +165,7 @@ int register_vlan_dev(struct net_device *dev) goto out_uninit_gvrp; } - err = vlan_group_prealloc_vid(grp, vlan_id); + err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); if (err < 0) goto out_uninit_mvrp; @@ -181,7 +186,7 @@ int register_vlan_dev(struct net_device *dev) /* So, got the sucker initialized, now lets place * it into our local structure. */ - vlan_group_set_device(grp, vlan_id, dev); + vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); grp->nr_vlan_devs++; return 0; @@ -195,7 +200,7 @@ out_uninit_gvrp: if (grp->nr_vlan_devs == 0) vlan_gvrp_uninit_applicant(real_dev); out_vid_del: - vlan_vid_del(real_dev, htons(ETH_P_8021Q), vlan_id); + vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); return err; } @@ -213,7 +218,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) if (vlan_id >= VLAN_VID_MASK) return -ERANGE; - err = vlan_check_real_dev(real_dev, vlan_id); + err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id); if (err < 0) return err; @@ -255,6 +260,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) new_dev->mtu = real_dev->mtu; new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); + vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q); vlan_dev_priv(new_dev)->vlan_id = vlan_id; vlan_dev_priv(new_dev)->real_dev = real_dev; vlan_dev_priv(new_dev)->dent = NULL; @@ -341,6 +347,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, int i, flgs; struct net_device *vlandev; struct vlan_dev_priv *vlan; + bool last = false; LIST_HEAD(list); if (is_vlan_dev(dev)) @@ -365,22 +372,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, switch (event) { case NETDEV_CHANGE: /* Propagate real device state to vlan devices */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) netif_stacked_transfer_operstate(dev, vlandev); - } break; case NETDEV_CHANGEADDR: /* Adjust unicast filters on underlying device */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; @@ -390,11 +388,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, break; case NETDEV_CHANGEMTU: - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) { if (vlandev->mtu <= dev->mtu) continue; @@ -404,14 +398,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, case NETDEV_FEAT_CHANGE: /* Propagate device features to underlying device */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) vlan_transfer_features(dev, vlandev); - } - break; case NETDEV_DOWN: @@ -419,11 +407,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, vlan_vid_del(dev, htons(ETH_P_8021Q), 0); /* Put all VLANs for this dev in the down state too. */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; @@ -437,11 +421,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, case NETDEV_UP: /* Put all VLANs for this dev in the up state too. */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (flgs & IFF_UP) continue; @@ -458,17 +438,15 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (dev->reg_state != NETREG_UNREGISTERING) break; - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) { /* removal of last vid destroys vlan_info, abort * afterwards */ if (vlan_info->nr_vids == 1) - i = VLAN_N_VID; + last = true; unregister_vlan_dev(vlandev, &list); + if (last) + break; } unregister_netdevice_many(&list); break; @@ -482,13 +460,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, case NETDEV_NOTIFY_PEERS: case NETDEV_BONDING_FAILOVER: /* Propagate to vlan devices */ - for (i = 0; i < VLAN_N_VID; i++) { - vlandev = vlan_group_get_device(grp, i); - if (!vlandev) - continue; - + vlan_group_for_each_dev(grp, i, vlandev) call_netdevice_notifiers(event, vlandev); - } break; } diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 670f1e8cfc0f..245de9653db0 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -49,6 +49,7 @@ struct netpoll; * @ingress_priority_map: ingress priority mappings * @nr_egress_mappings: number of egress priority mappings * @egress_priority_map: hash of egress priority mappings + * @vlan_proto: VLAN encapsulation protocol * @vlan_id: VLAN identifier * @flags: device flags * @real_dev: underlying netdevice @@ -62,6 +63,7 @@ struct vlan_dev_priv { unsigned int nr_egress_mappings; struct vlan_priority_tci_mapping *egress_priority_map[16]; + __be16 vlan_proto; u16 vlan_id; u16 flags; @@ -87,10 +89,16 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) +enum vlan_protos { + VLAN_PROTO_8021Q = 0, + VLAN_PROTO_NUM, +}; + struct vlan_group { unsigned int nr_vlan_devs; struct hlist_node hlist; /* linked list */ - struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; + struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM] + [VLAN_GROUP_ARRAY_SPLIT_PARTS]; }; struct vlan_info { @@ -103,37 +111,64 @@ struct vlan_info { struct rcu_head rcu; }; -static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, - u16 vlan_id) +static inline unsigned int vlan_proto_idx(__be16 proto) +{ + switch (proto) { + case __constant_htons(ETH_P_8021Q): + return VLAN_PROTO_8021Q; + default: + BUG(); + } +} + +static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg, + unsigned int pidx, + u16 vlan_id) { struct net_device **array; - array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + + array = vg->vlan_devices_arrays[pidx] + [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; } +static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, + __be16 vlan_proto, + u16 vlan_id) +{ + return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id); +} + static inline void vlan_group_set_device(struct vlan_group *vg, - u16 vlan_id, + __be16 vlan_proto, u16 vlan_id, struct net_device *dev) { struct net_device **array; if (!vg) return; - array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)] + [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; } /* Must be invoked with rcu_read_lock or with RTNL. */ static inline struct net_device *vlan_find_dev(struct net_device *real_dev, - u16 vlan_id) + __be16 vlan_proto, u16 vlan_id) { struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); if (vlan_info) - return vlan_group_get_device(&vlan_info->grp, vlan_id); + return vlan_group_get_device(&vlan_info->grp, + vlan_proto, vlan_id); return NULL; } +#define vlan_group_for_each_dev(grp, i, dev) \ + for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \ + if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \ + (i) % VLAN_N_VID))) + /* found in vlan_dev.c */ void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio); @@ -142,7 +177,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); -int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); +int vlan_check_real_dev(struct net_device *real_dev, + __be16 protocol, u16 vlan_id); void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 04e3b95a0d48..4e4c360353ea 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -12,7 +12,7 @@ bool vlan_do_receive(struct sk_buff **skbp) struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; - vlan_dev = vlan_find_dev(skb->dev, vlan_id); + vlan_dev = vlan_find_dev(skb->dev, htons(ETH_P_8021Q), vlan_id); if (!vlan_dev) return false; @@ -62,12 +62,13 @@ bool vlan_do_receive(struct sk_buff **skbp) /* Must be invoked with rcu_read_lock. */ struct net_device *__vlan_find_dev_deep(struct net_device *dev, - u16 vlan_id) + __be16 vlan_proto, u16 vlan_id) { struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); if (vlan_info) { - return vlan_group_get_device(&vlan_info->grp, vlan_id); + return vlan_group_get_device(&vlan_info->grp, + vlan_proto, vlan_id); } else { /* * Lower devices of master uppers (bonding, team) do not have @@ -78,7 +79,8 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev, upper_dev = netdev_master_upper_dev_get_rcu(dev); if (upper_dev) - return __vlan_find_dev_deep(upper_dev, vlan_id); + return __vlan_find_dev_deep(upper_dev, + vlan_proto, vlan_id); } return NULL; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 5c4892a86410..d7457b7e1b95 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -99,6 +99,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, const void *daddr, const void *saddr, unsigned int len) { + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_hdr *vhdr; unsigned int vhdrlen = 0; u16 vlan_tci = 0; @@ -120,8 +121,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, else vhdr->h_vlan_encapsulated_proto = htons(len); - skb->protocol = htons(ETH_P_8021Q); - type = ETH_P_8021Q; + skb->protocol = vlan->vlan_proto; + type = ntohs(vlan->vlan_proto); vhdrlen = VLAN_HLEN; } @@ -161,7 +162,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ - if (veth->h_vlan_proto != htons(ETH_P_8021Q) || + if (veth->h_vlan_proto != vlan->vlan_proto || vlan->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; vlan_tci = vlan->vlan_id; diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c index 6f9755352760..66a80320b032 100644 --- a/net/8021q/vlan_gvrp.c +++ b/net/8021q/vlan_gvrp.c @@ -32,6 +32,8 @@ int vlan_gvrp_request_join(const struct net_device *dev) const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return 0; return garp_request_join(vlan->real_dev, &vlan_gvrp_app, &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); } @@ -41,6 +43,8 @@ void vlan_gvrp_request_leave(const struct net_device *dev) const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return; garp_request_leave(vlan->real_dev, &vlan_gvrp_app, &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); } diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c index d9ec1d5964aa..e0fe091801b0 100644 --- a/net/8021q/vlan_mvrp.c +++ b/net/8021q/vlan_mvrp.c @@ -38,6 +38,8 @@ int vlan_mvrp_request_join(const struct net_device *dev) const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return 0; return mrp_request_join(vlan->real_dev, &vlan_mrp_app, &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); } @@ -47,6 +49,8 @@ void vlan_mvrp_request_leave(const struct net_device *dev) const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return; mrp_request_leave(vlan->real_dev, &vlan_mrp_app, &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); } diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 1789658b7cd7..a1a956ab39a5 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -118,11 +118,12 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, if (!real_dev) return -ENODEV; - vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); - vlan->real_dev = real_dev; - vlan->flags = VLAN_FLAG_REORDER_HDR; + vlan->vlan_proto = htons(ETH_P_8021Q); + vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); + vlan->real_dev = real_dev; + vlan->flags = VLAN_FLAG_REORDER_HDR; - err = vlan_check_real_dev(real_dev, vlan->vlan_id); + err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id); if (err < 0) return err; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fe43bc7b063f..bd61bf5159c9 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -535,7 +535,8 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) return br; - vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); + vlan = __vlan_find_dev_deep(br, htons(ETH_P_8021Q), + vlan_tx_tag_get(skb) & VLAN_VID_MASK); return vlan ? vlan : br; } -- cgit v1.2.3-70-g09d2 From 86a9bad3ab6b6f858fd4443b48738cabbb6d094c Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 19 Apr 2013 02:04:30 +0000 Subject: net: vlan: add protocol argument to packet tagging functions Add a protocol argument to the VLAN packet tagging functions. In case of HW tagging, we need that protocol available in the ndo_start_xmit functions, so it is stored in a new field in the skb. The new field fits into a hole (on 64 bit) and doesn't increase the sks's size. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/nes_hw.c | 2 +- drivers/net/bonding/bond_alb.c | 4 +-- drivers/net/ethernet/3com/typhoon.c | 2 +- drivers/net/ethernet/adaptec/starfire.c | 2 +- drivers/net/ethernet/alteon/acenic.c | 2 +- drivers/net/ethernet/amd/amd8111e.c | 2 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2 +- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- drivers/net/ethernet/atheros/atlx/atl1.c | 2 +- drivers/net/ethernet/atheros/atlx/atl2.c | 2 +- drivers/net/ethernet/broadcom/bnx2.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 4 +-- drivers/net/ethernet/broadcom/tg3.c | 2 +- drivers/net/ethernet/brocade/bna/bnad.c | 2 +- drivers/net/ethernet/chelsio/cxgb/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/sge.c | 4 +-- drivers/net/ethernet/chelsio/cxgb4/sge.c | 4 +-- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 5 ++-- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 6 ++--- drivers/net/ethernet/intel/e1000/e1000_main.c | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- drivers/net/ethernet/intel/igbvf/netdev.c | 2 +- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 4 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- drivers/net/ethernet/jme.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 4 +-- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- drivers/net/ethernet/natsemi/ns83820.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.c | 2 +- drivers/net/ethernet/nvidia/forcedeth.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 8 +++--- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 8 +++--- drivers/net/ethernet/realtek/8139cp.c | 2 +- drivers/net/ethernet/realtek/r8169.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 2 +- drivers/net/ethernet/via/via-rhine.c | 2 +- drivers/net/ethernet/via/via-velocity.c | 2 +- drivers/net/usb/cdc_mbim.c | 2 +- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- include/linux/if_vlan.h | 33 ++++++++++++++++------- include/linux/skbuff.h | 2 ++ net/8021q/vlan_core.c | 8 +++--- net/8021q/vlan_dev.c | 2 +- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/bridge/br_netfilter.c | 2 +- net/bridge/br_vlan.c | 4 +-- net/core/dev.c | 5 ++-- net/core/netpoll.c | 5 ++-- net/core/skbuff.c | 1 + net/openvswitch/actions.c | 6 ++--- net/openvswitch/datapath.c | 2 +- 56 files changed, 107 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 67647e264611..418004c93feb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", nesvnic->netdev->name, vlan_tag); - __vlan_hwaccel_put_tag(rx_skb, vlan_tag); + __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag); } if (nes_use_lro) lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index f5e052723029..e02cc265723a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -514,7 +514,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) skb->dev = client_info->slave->dev; if (client_info->tag) { - skb = vlan_put_tag(skb, client_info->vlan_id); + skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", client_info->slave->bond->dev->name); @@ -1014,7 +1014,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) continue; } - skb = vlan_put_tag(skb, vlan->vlan_id); + skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", bond->dev->name); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 839a96213742..144942f6372b 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read skb_checksum_none_assert(new_skb); if (rx->rxStatus & TYPHOON_RX_VLAN) - __vlan_hwaccel_put_tag(new_skb, + __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), ntohl(rx->vlanTag) & 0xffff); netif_receive_skb(new_skb); diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index cdbc5443ae3b..8b04bfc20cfb 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1498,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota) printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", vlid); } - __vlan_hwaccel_put_tag(skb, vlid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid); } #endif /* VLAN_SUPPORT */ netif_receive_skb(skb); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index a7689d931053..b7894f8af9d1 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) /* send it up */ if ((bd_flags & BD_FLG_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, retdesc->vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan); netif_rx(skb); dev->stats.rx_packets++; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 6bad84d6e2c4..8e6b665a6726 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) #if AMD8111E_VLAN_TAG_USED if (vtag == TT_VLAN_TAGGED){ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } #endif netif_receive_skb(skb); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3565255cdd81..0ba900762b13 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1809,7 +1809,7 @@ rrs_checked: AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); vlan = le16_to_cpu(vlan); - __vlan_hwaccel_put_tag(skb, vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); } netif_receive_skb(skb); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 598a61151668..0688bb82b442 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1435,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, netdev_dbg(netdev, "RXD VLAN TAG=0x%04x\n", prrs->vtag); - __vlan_hwaccel_put_tag(skb, vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } netif_receive_skb(skb); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index fd7d85044e4a..fa0915f3999b 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2024,7 +2024,7 @@ rrd_ok: ((rrd->vlan_tag & 7) << 13) | ((rrd->vlan_tag & 8) << 9); - __vlan_hwaccel_put_tag(skb, vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } netif_receive_skb(skb); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 6b2c08a89b7e..265ce1b752ed 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -452,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) ((rxd->status.vtag&7) << 13) | ((rxd->status.vtag&8) << 9); - __vlan_hwaccel_put_tag(skb, vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } netif_rx(skb); netdev->stats.rx_bytes += rx_size; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 42a8bc8df5dd..5d204492c603 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3211,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag); skb->protocol = eth_type_trans(skb, bp->dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 352e58ede4d5..6b50443d3456 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -719,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, skb, cqe, cqe_idx)) { if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); bnx2x_gro_receive(bp, fp, skb); } else { DP(NETIF_MSG_RX_STATUS, @@ -994,7 +994,7 @@ reuse_rx: if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); napi_gro_receive(&fp->napi, skb); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0c22c9a059c4..ac83c87e0b1b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6715,7 +6715,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (desc->type_flags & RXD_FLAG_VLAN && !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), desc->err_vlan & RXD_VLAN_MASK); napi_gro_receive(&tnapi->napi, skb); diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index c0bc44e51fa9..ce4a030d3d0c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) rcb->rxq->rx_bytes += length; if (flags & BNA_CQ_EF_VLAN) - __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) napi_gro_frags(&rx_ctrl->napi); diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index f85e0659432b..8061fb0ef7ed 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1386,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) if (p->vlan_valid) { st->vlan_xtract++; - __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); } netif_receive_skb(skb); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 9d67eb794c4b..f12e6b85a653 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, if (p->vlan_valid) { qs->port_stats[SGE_PSTAT_VLANEX]++; - __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); } if (rq->polling) { if (lro) @@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, if (cpl->vlan_valid) { qs->port_stats[SGE_PSTAT_VLANEX]++; - __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); } napi_gro_frags(&qs->napi); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 8b47b253e204..2bfbb206b35a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1633,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->rxhash = (__force u32)pkt->rsshdr.hash_val; if (unlikely(pkt->vlan_ex)) { - __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); rxq->stats.vlan_ex++; } ret = napi_gro_frags(&rxq->rspq.napi); @@ -1705,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, skb_checksum_none_assert(skb); if (unlikely(pkt->vlan_ex)) { - __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); rxq->stats.vlan_ex++; } netif_receive_skb(skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 61dfb2a47929..df296af20bd5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1482,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb_record_rx_queue(skb, rxq->rspq.idx); if (pkt->vlan_ex) { - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), + be16_to_cpu(pkt->vlan)); rxq->stats.vlan_ex++; } ret = napi_gro_frags(&rxq->rspq.napi); @@ -1551,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, if (pkt->vlan_ex) { rxq->stats.vlan_ex++; - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); } netif_receive_skb(skb); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 05c1e59b6bff..635f55992d7e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, } if (vlan_stripped) - __vlan_hwaccel_put_tag(skb, vlan_tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); if (netdev->features & NETIF_F_GRO) napi_gro_receive(&enic->napi[q_number], skb); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b41333184916..811d0a47d17a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -771,7 +771,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (vlan_tx_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); - __vlan_put_tag(skb, vlan_tag); + __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); skb->vlan_tci = 0; } @@ -1383,7 +1383,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, if (rxcp->vlanf) - __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); netif_receive_skb(skb); } @@ -1439,7 +1439,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, skb->rxhash = rxcp->rss_hash; if (rxcp->vlanf) - __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); napi_gro_frags(napi); } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8239dafdd8e7..59ad007dd5aa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4003,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, if (status & E1000_RXD_STAT_VP) { u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } napi_gro_receive(&adapter->napi, skb); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8c17f01a155f..da7f2fad5ba4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -554,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (staterr & E1000_RXD_STAT_VP) - __vlan_hwaccel_put_tag(skb, tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); napi_gro_receive(&adapter->napi, skb); } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d13ea71c7c1f..9bf08b977daa 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6683,7 +6683,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, else vid = le16_to_cpu(rx_desc->wb.upper.vlan); - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } skb_record_rx_queue(skb, rx_ring->queue_index); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 3854fd698b85..93eb7ee06d3e 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, else vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; if (test_bit(vid, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } napi_gro_receive(&adapter->rx_ring->napi, skb); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index a32f274acd36..fce3e92f9d11 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -2082,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) skb->protocol = eth_type_trans(skb, netdev); if (status & IXGB_RX_DESC_STATUS_VP) - __vlan_hwaccel_put_tag(skb, - le16_to_cpu(rx_desc->special)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rx_desc->special)); netif_receive_skb(skb); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3becffc77321..6225f880a3f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1491,7 +1491,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } skb_record_rx_queue(skb, rx_ring->queue_index); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4bc1f84c9352..1f5166ad6bb5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -291,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index d28ce6f97172..070a6f1a0577 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { u16 vid = le16_to_cpu(rxdesc->descwb.vlan); - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); NET_STAT(jme).rx_bytes += 4; } jme->jme_rx(skb); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index bf9da1b7b090..256ae789c143 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length) struct sk_buff *skb; skb = sky2->rx_ring[sky2->rx_next].skb; - __vlan_hwaccel_put_tag(skb, be16_to_cpu(length)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length)); } static void sky2_rx_hash(struct sky2_port *sky2, u32 status) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index c7f856308e1a..4006f8857cb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -673,7 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { u16 vid = be16_to_cpu(cqe->sl_vid); - __vlan_hwaccel_put_tag(gro_skb, vid); + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); } if (dev->features & NETIF_F_RXHASH) @@ -716,7 +716,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) - __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); /* Push it up the stack */ netif_receive_skb(skb); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 46262ea610fd..7be9788ed0f6 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1290,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) skb->csum = csum_sub(skb->csum, vsum); } /* pop tag */ - __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI)); memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); skb->len -= VLAN_HLEN; skb->data_len -= VLAN_HLEN; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 60267e91cbda..d3b47003a575 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev) unsigned short tag; tag = ntohs(extsts & EXTSTS_VTG_MASK); - __vlan_hwaccel_put_tag(skb, tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); } #endif rx_rc = netif_rx(skb); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index ec82d59b03ed..51b00941302c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8555,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) skb->protocol = eth_type_trans(skb, dev); if (vlan_tag && sp->vlan_strip_flag) - __vlan_hwaccel_put_tag(skb, vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); if (sp->config.napi) netif_receive_skb(skb); else diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index a9396142201c..cbfaed5f2f8d 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, if (ext_info->vlan && ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) - __vlan_hwaccel_put_tag(skb, ext_info->vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan); napi_gro_receive(ring->napi_p, skb); vxge_debug_entryexit(VXGE_TRACE, diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index fcad64081d74..b003fe53c8e2 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2969,7 +2969,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) vlanflags & NV_RX3_VLAN_TAG_PRESENT) { u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index a85ca63a2c9e..56223a6aa408 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1050,7 +1050,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); napi_gro_receive(&sds_ring->napi, skb); @@ -1153,7 +1153,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, } if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); netif_receive_skb(skb); adapter->stats.lro_pkts++; @@ -1518,7 +1518,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); napi_gro_receive(&sds_ring->napi, skb); @@ -1615,7 +1615,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, } if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); netif_receive_skb(skb); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index a9016acc2d6a..44cf72ac2489 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1498,7 +1498,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); napi_gro_frags(napi); } @@ -1574,7 +1574,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(napi, skb); else @@ -1670,7 +1670,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else @@ -1975,7 +1975,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, rx_ring->rx_bytes += skb->len; skb_record_rx_queue(skb, rx_ring->cq_id); if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) - __vlan_hwaccel_put_tag(skb, vlan_id); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6d03b52e56f1..7d1fb9ad1296 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp->dev->stats.rx_bytes += skb->len; if (opts2 & RxVlanTagged) - __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); napi_gro_receive(&cp->napi, skb); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 86d5d7909d10..c6dac38fd9cc 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1843,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RxVlanTag) - __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index f7050c573519..571452e786d5 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1148,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan, priv->ndev->name, GET_RXD_VLAN_ID(rxd_vlan), GET_RXD_VTAG(rxd_val1)); - __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan)); } netif_receive_skb(skb); } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index c6014916f622..ca98acabf1b4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1936,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); if (unlikely(desc_length & DescTag)) - __vlan_hwaccel_put_tag(skb, vlan_tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); netif_receive_skb(skb); u64_stats_update_begin(&rp->rx_stats.syncp); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 91cd59146c24..fb6248956ee2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2080,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) if (rd->rdesc0.RSR & RSR_DETAG) { u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } netif_rx(skb); diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index b7e9cd7ca6d5..cc6dfe4102fd 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -221,7 +221,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ /* map MBIM session to VLAN */ if (tci) - vlan_put_tag(skb, tci); + vlan_put_tag(skb, htons(ETH_P_8021Q), tci); err: return skb; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 27b889992ab8..55a62cae2cb4 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1293,7 +1293,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skb->protocol = eth_type_trans(skb, adapter->netdev); if (unlikely(rcd->ts)) - __vlan_hwaccel_put_tag(skb, rcd->tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); if (adapter->netdev->features & NETIF_F_LRO) netif_receive_skb(skb); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 2c9fb65f8267..8086ff9988b1 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -157,9 +157,18 @@ static inline bool vlan_uses_dev(const struct net_device *dev) } #endif +static inline bool vlan_hw_offload_capable(netdev_features_t features, + __be16 proto) +{ + if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) + return true; + return false; +} + /** * vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload @@ -170,7 +179,8 @@ static inline bool vlan_uses_dev(const struct net_device *dev) * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { struct vlan_ethhdr *veth; @@ -185,7 +195,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) skb->mac_header -= VLAN_HLEN; /* first, the ethernet type */ - veth->h_vlan_proto = htons(ETH_P_8021Q); + veth->h_vlan_proto = vlan_proto; /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); @@ -204,24 +214,28 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - skb = vlan_insert_tag(skb, vlan_tci); + skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) - skb->protocol = htons(ETH_P_8021Q); + skb->protocol = vlan_proto; return skb; } /** * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { + skb->vlan_proto = vlan_proto; skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; return skb; } @@ -236,12 +250,13 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, * Assumes skb->dev is the target that will xmit this frame. * Returns a VLAN tagged skb. */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { - return __vlan_hwaccel_put_tag(skb, vlan_tci); + if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { + return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); } else { - return __vlan_put_tag(skb, vlan_tci); + return __vlan_put_tag(skb, vlan_proto, vlan_tci); } } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e27d1c782f32..f5bed7b31954 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -387,6 +387,7 @@ typedef unsigned char *sk_buff_data_t; * @secmark: security marking * @mark: Generic packet mark * @dropcount: total number of sk_receive_queue overflows + * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) @@ -465,6 +466,7 @@ struct sk_buff { __u32 rxhash; + __be16 vlan_proto; __u16 vlan_tci; #ifdef CONFIG_NET_SCHED diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4e4c360353ea..bdb0b9d2e9cf 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -8,11 +8,12 @@ bool vlan_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; + __be16 vlan_proto = skb->vlan_proto; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; - vlan_dev = vlan_find_dev(skb->dev, htons(ETH_P_8021Q), vlan_id); + vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); if (!vlan_dev) return false; @@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp) * original position later */ skb_push(skb, offset); - skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); + skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, + skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); @@ -127,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); - __vlan_hwaccel_put_tag(skb, vlan_tci); + __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index d7457b7e1b95..8af508536d36 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -167,7 +167,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); - skb = __vlan_hwaccel_put_tag(skb, vlan_tci); + skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); } skb->dev = vlan->real_dev; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 6a4f728680ae..379061c72549 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -341,7 +341,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, } if (vid != -1) - skb = vlan_insert_tag(skb, vid); + skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid); skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index bd61bf5159c9..1ed75bfd8d1d 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) return br; - vlan = __vlan_find_dev_deep(br, htons(ETH_P_8021Q), + vlan = __vlan_find_dev_deep(br, skb->vlan_proto, vlan_tx_tag_get(skb) & VLAN_VID_MASK); return vlan ? vlan : br; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index c3076e2c4294..bd58b45f5f90 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -175,7 +175,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, * mac header. */ skb_push(skb, ETH_HLEN); - skb = __vlan_put_tag(skb, skb->vlan_tci); + skb = __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci); if (!skb) goto out; /* put skb->data back to where it was */ @@ -217,7 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, /* PVID is set on this port. Any untagged ingress * frame is considered to belong to this vlan. */ - __vlan_hwaccel_put_tag(skb, pvid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); return true; } diff --git a/net/core/dev.c b/net/core/dev.c index 07a8e9dc43fc..3a12ee132b59 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2482,8 +2482,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, features = netif_skb_features(skb); if (vlan_tx_tag_present(skb) && - !(features & NETIF_F_HW_VLAN_CTAG_TX)) { - skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + !vlan_hw_offload_capable(features, skb->vlan_proto)) { + skb = __vlan_put_tag(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8de961e67cf7..209d84253dd5 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -383,8 +383,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, if (__netif_tx_trylock(txq)) { if (!netif_xmit_stopped(txq)) { if (vlan_tx_tag_present(skb) && - !(netif_skb_features(skb) & NETIF_F_HW_VLAN_CTAG_TX)) { - skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + !vlan_hw_offload_capable(netif_skb_features(skb), + skb->vlan_proto)) { + skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); if (unlikely(!skb)) break; skb->vlan_tci = 0; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ba646145cd5c..a92d9e7d10f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -707,6 +707,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->tc_verd = old->tc_verd; #endif #endif + new->vlan_proto = old->vlan_proto; new->vlan_tci = old->vlan_tci; skb_copy_secmark(new, old); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index d4d5363c7ba7..894b6cbdd929 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -98,7 +98,7 @@ static int pop_vlan(struct sk_buff *skb) if (unlikely(err)) return err; - __vlan_hwaccel_put_tag(skb, ntohs(tci)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); return 0; } @@ -110,7 +110,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla /* push down current VLAN tag */ current_tag = vlan_tx_tag_get(skb); - if (!__vlan_put_tag(skb, current_tag)) + if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) return -ENOMEM; if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -118,7 +118,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla + (2 * ETH_ALEN), VLAN_HLEN, 0)); } - __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); + __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); return 0; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index b7d0b7c3fe2c..7bb5d4f6bb90 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -401,7 +401,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, if (!nskb) return -ENOMEM; - nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); + nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 8ad227ff89a7e6f05d07cd0acfd95ed3a24450ca Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 19 Apr 2013 02:04:31 +0000 Subject: net: vlan: add 802.1ad support Add support for 802.1ad VLAN devices. This mainly consists of checking for ETH_P_8021AD in addition to ETH_P_8021Q in a couple of places and check offloading capabilities based on the used protocol. Configuration is done using "ip link": # ip link add link eth0 eth0.1000 \ type vlan proto 802.1ad id 1000 # ip link add link eth0.1000 eth0.1000.1000 \ type vlan proto 802.1q id 1000 52:54:00:12:34:56 > 92:b1:54:28:e4:8c, ethertype 802.1Q (0x8100), length 106: vlan 1000, p 0, ethertype 802.1Q, vlan 1000, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 0, offset 0, flags [DF], proto ICMP (1), length 84) 20.1.0.2 > 20.1.0.1: ICMP echo request, id 3003, seq 8, length 64 92:b1:54:28:e4:8c > 52:54:00:12:34:56, ethertype 802.1Q-QinQ (0x88a8), length 106: vlan 1000, p 0, ethertype 802.1Q, vlan 1000, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 47944, offset 0, flags [none], proto ICMP (1), length 84) 20.1.0.1 > 20.1.0.2: ICMP echo reply, id 3003, seq 8, length 64 Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 6 ++++-- include/linux/netdev_features.h | 6 ++++++ include/uapi/linux/if_link.h | 1 + net/8021q/Kconfig | 2 +- net/8021q/vlan.h | 3 +++ net/8021q/vlan_core.c | 18 ++++++++++++++---- net/8021q/vlan_netlink.c | 25 ++++++++++++++++++++++--- net/core/dev.c | 16 ++++++++++------ 8 files changed, 61 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8086ff9988b1..a78f9390da87 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -162,6 +162,8 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, { if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) return true; + if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) + return true; return false; } @@ -271,9 +273,9 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; - if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { + if (veth->h_vlan_proto != htons(ETH_P_8021Q) && + veth->h_vlan_proto != htons(ETH_P_8021AD)) return -EINVAL; - } *vlan_tci = ntohs(veth->h_vlan_TCI); return 0; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 785913b8983d..cbaa027ef5a7 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -25,6 +25,9 @@ enum { NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */ + NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ + NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ + NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ NETIF_F_GSO_BIT, /* Enable software GSO. */ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ @@ -83,6 +86,9 @@ enum { #define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER) #define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX) #define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX) +#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) +#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) +#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) #define NETIF_F_LLTX __NETIF_F(LLTX) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 9922704f08af..e3163544f339 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -250,6 +250,7 @@ enum { IFLA_VLAN_FLAGS, IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_INGRESS_QOS, + IFLA_VLAN_PROTOCOL, __IFLA_VLAN_MAX, }; diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig index 8f7517df41a5..b85a91fa61f1 100644 --- a/net/8021q/Kconfig +++ b/net/8021q/Kconfig @@ -3,7 +3,7 @@ # config VLAN_8021Q - tristate "802.1Q VLAN Support" + tristate "802.1Q/802.1ad VLAN Support" ---help--- Select this and you will be able to create 802.1Q VLAN interfaces on your ethernet interfaces. 802.1Q VLAN supports almost diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 245de9653db0..abc9cb631c47 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -91,6 +91,7 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) enum vlan_protos { VLAN_PROTO_8021Q = 0, + VLAN_PROTO_8021AD, VLAN_PROTO_NUM, }; @@ -116,6 +117,8 @@ static inline unsigned int vlan_proto_idx(__be16 proto) switch (proto) { case __constant_htons(ETH_P_8021Q): return VLAN_PROTO_8021Q; + case __constant_htons(ETH_P_8021AD): + return VLAN_PROTO_8021AD; default: BUG(); } diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bdb0b9d2e9cf..ebfa2fceb88b 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -194,6 +194,18 @@ struct vlan_vid_info { int refcount; }; +static bool vlan_hw_filter_capable(const struct net_device *dev, + const struct vlan_vid_info *vid_info) +{ + if (vid_info->proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + return true; + if (vid_info->proto == htons(ETH_P_8021AD) && + dev->features & NETIF_F_HW_VLAN_STAG_FILTER) + return true; + return false; +} + static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, __be16 proto, u16 vid) { @@ -231,8 +243,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, if (!vid_info) return -ENOMEM; - if (proto == htons(ETH_P_8021Q) && - dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (vlan_hw_filter_capable(dev, vid_info)) { err = ops->ndo_vlan_rx_add_vid(dev, proto, vid); if (err) { kfree(vid_info); @@ -290,8 +301,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info, u16 vid = vid_info->vid; int err; - if (proto == htons(ETH_P_8021Q) && - dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (vlan_hw_filter_capable(dev, vid_info)) { err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); if (err) { pr_warn("failed to kill vid %04x/%d for device %s\n", diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index a1a956ab39a5..309129732285 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -23,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = { [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, + [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 }, }; static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { @@ -53,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) if (!data) return -EINVAL; + if (data[IFLA_VLAN_PROTOCOL]) { + switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) { + case __constant_htons(ETH_P_8021Q): + case __constant_htons(ETH_P_8021AD): + break; + default: + return -EPROTONOSUPPORT; + } + } + if (data[IFLA_VLAN_ID]) { id = nla_get_u16(data[IFLA_VLAN_ID]); if (id >= VLAN_VID_MASK) @@ -107,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev; + __be16 proto; int err; if (!data[IFLA_VLAN_ID]) @@ -118,7 +130,12 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, if (!real_dev) return -ENODEV; - vlan->vlan_proto = htons(ETH_P_8021Q); + if (data[IFLA_VLAN_PROTOCOL]) + proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]); + else + proto = htons(ETH_P_8021Q); + + vlan->vlan_proto = proto; vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); vlan->real_dev = real_dev; vlan->flags = VLAN_FLAG_REORDER_HDR; @@ -152,7 +169,8 @@ static size_t vlan_get_size(const struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); - return nla_total_size(2) + /* IFLA_VLAN_ID */ + return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ + nla_total_size(2) + /* IFLA_VLAN_ID */ sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ vlan_qos_map_size(vlan->nr_ingress_mappings) + vlan_qos_map_size(vlan->nr_egress_mappings); @@ -167,7 +185,8 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) struct nlattr *nest; unsigned int i; - if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) + if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) || + nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id)) goto nla_put_failure; if (vlan->flags) { f.flags = vlan->flags; diff --git a/net/core/dev.c b/net/core/dev.c index 3a12ee132b59..fad4c385f7a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2212,7 +2212,7 @@ __be16 skb_network_protocol(struct sk_buff *skb) __be16 type = skb->protocol; int vlan_depth = ETH_HLEN; - while (type == htons(ETH_P_8021Q)) { + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) @@ -2428,20 +2428,22 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) features &= ~NETIF_F_GSO_MASK; - if (protocol == htons(ETH_P_8021Q)) { + if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } else if (!vlan_tx_tag_present(skb)) { return harmonize_features(skb, protocol, features); } - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX); + features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); - if (protocol != htons(ETH_P_8021Q)) { + if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) { return harmonize_features(skb, protocol, features); } else { features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | - NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; return harmonize_features(skb, protocol, features); } } @@ -3360,6 +3362,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb) case __constant_htons(ETH_P_IP): case __constant_htons(ETH_P_IPV6): case __constant_htons(ETH_P_8021Q): + case __constant_htons(ETH_P_8021AD): return true; default: return false; @@ -3400,7 +3403,8 @@ another_round: __this_cpu_inc(softnet_data.processed); - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || + skb->protocol == cpu_to_be16(ETH_P_8021AD)) { skb = vlan_untag(skb); if (unlikely(!skb)) goto unlock; -- cgit v1.2.3-70-g09d2 From e32123e59871b9389d5b3fe9318611c7f1d1307a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:46:57 +0000 Subject: netlink: rename ssk to sk in struct netlink_skb_params Memory mapped netlink needs to store the receiving userspace socket when sending from the kernel to userspace. Rename 'ssk' to 'sk' to avoid confusion. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netlink.h | 2 +- net/ipv4/inet_diag.c | 6 +++--- net/ipv4/udp_diag.c | 4 ++-- net/netfilter/nfnetlink_log.c | 2 +- net/netlink/af_netlink.c | 2 +- net/sched/cls_flow.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index e0f746b7b95c..d8e9264ae04a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -19,7 +19,7 @@ struct netlink_skb_parms { struct scm_creds creds; /* Skb credentials */ __u32 portid; __u32 dst_group; - struct sock *ssk; + struct sock *sk; }; #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8620408af574..5f648751fce2 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -324,7 +324,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s } err = sk_diag_fill(sk, rep, req, - sk_user_ns(NETLINK_CB(in_skb).ssk), + sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, nlh); if (err < 0) { @@ -630,7 +630,7 @@ static int inet_csk_diag_dump(struct sock *sk, return 0; return inet_csk_diag_fill(sk, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).ssk), + sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } @@ -805,7 +805,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, } err = inet_diag_fill_req(skb, sk, req, - sk_user_ns(NETLINK_CB(cb->skb).ssk), + sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh); if (err < 0) { diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 369a781851ad..7927db0a9279 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -25,7 +25,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, return 0; return inet_sk_diag_fill(sk, NULL, skb, req, - sk_user_ns(NETLINK_CB(cb->skb).ssk), + sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); } @@ -71,7 +71,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, goto out; err = inet_sk_diag_fill(sk, NULL, rep, req, - sk_user_ns(NETLINK_CB(in_skb).ssk), + sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, nlh); if (err < 0) { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 1a0be2af1dd8..50aaf716cd68 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -824,7 +824,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, inst = instance_create(net, group_num, NETLINK_CB(skb).portid, - sk_user_ns(NETLINK_CB(skb).ssk)); + sk_user_ns(NETLINK_CB(skb).sk)); if (IS_ERR(inst)) { ret = PTR_ERR(inst); goto out; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f20a81005177..978a61f7c87f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -891,7 +891,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, if (nlk->netlink_rcv != NULL) { ret = skb->len; skb_set_owner_r(skb, sk); - NETLINK_CB(skb).ssk = ssk; + NETLINK_CB(skb).sk = ssk; nlk->netlink_rcv(skb); consume_skb(skb); } else { diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index aa36a8c8b33b..7881e2fccbc2 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -393,7 +393,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, return -EOPNOTSUPP; if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && - sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns) + sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns) return -EOPNOTSUPP; } -- cgit v1.2.3-70-g09d2 From 0ebd0ac5ff01ebf412e1bd3c33620ef7ffc5d866 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:46:58 +0000 Subject: net: add function to allocate sk_buff head without data area Add a function to allocate a sk_buff head without any data. This will be used by memory mapped netlink to attach data from the mmaped area to the skb. Additionally change skb_release_all() to check whether the skb has a data area to allow the skb destructor to clear the data pointer in case only a head has been allocated. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ++++++ net/core/skbuff.c | 30 +++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f5bed7b31954..2e0ced1af3b1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -651,6 +651,12 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } +extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node); +static inline struct sk_buff *alloc_skb_head(gfp_t priority) +{ + return __alloc_skb_head(priority, -1); +} + extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); extern struct sk_buff *skb_clone(struct sk_buff *skb, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a92d9e7d10f7..898cf5c566f9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -179,6 +179,33 @@ out: * */ +struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) +{ + struct sk_buff *skb; + + /* Get the HEAD */ + skb = kmem_cache_alloc_node(skbuff_head_cache, + gfp_mask & ~__GFP_DMA, node); + if (!skb) + goto out; + + /* + * Only clear those fields we need to clear, not those that we will + * actually initialise below. Hence, don't put any more fields after + * the tail pointer in struct sk_buff! + */ + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = NULL; + skb->truesize = sizeof(struct sk_buff); + atomic_set(&skb->users, 1); + +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->mac_header = ~0U; +#endif +out: + return skb; +} + /** * __alloc_skb - allocate a network buffer * @size: size to allocate @@ -584,7 +611,8 @@ static void skb_release_head_state(struct sk_buff *skb) static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); - skb_release_data(skb); + if (likely(skb->data)) + skb_release_data(skb); } /** -- cgit v1.2.3-70-g09d2 From 9652e931e73be7e54a9c40e9bcd4bbdafe92a406 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:47:02 +0000 Subject: netlink: add mmap'ed netlink helper functions Add helper functions for looking up mmap'ed frame headers, reading and writing their status, allocating skbs with mmap'ed data areas and a poll function. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netlink.h | 7 ++ net/netlink/af_netlink.c | 185 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 190 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index d8e9264ae04a..07c473848dbd 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -15,10 +15,17 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) return (struct nlmsghdr *)skb->data; } +enum netlink_skb_flags { + NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ + NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ + NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ +}; + struct netlink_skb_parms { struct scm_creds creds; /* Skb credentials */ __u32 portid; __u32 dst_group; + __u32 flags; struct sock *sk; }; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1d3c7128e90e..6560635fd25c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -89,6 +90,7 @@ EXPORT_SYMBOL_GPL(nl_table); static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); +static void netlink_skb_destructor(struct sk_buff *skb); DEFINE_RWLOCK(nl_table_lock); EXPORT_SYMBOL_GPL(nl_table_lock); @@ -109,6 +111,11 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u } #ifdef CONFIG_NETLINK_MMAP +static bool netlink_skb_is_mmaped(const struct sk_buff *skb) +{ + return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; +} + static __pure struct page *pgvec_to_page(const void *addr) { if (is_vmalloc_addr(addr)) @@ -332,8 +339,154 @@ out: mutex_unlock(&nlk->pg_vec_lock); return 0; } + +static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) +{ +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 + struct page *p_start, *p_end; + + /* First page is flushed through netlink_{get,set}_status */ + p_start = pgvec_to_page(hdr + PAGE_SIZE); + p_end = pgvec_to_page((void *)hdr + NL_MMAP_MSG_HDRLEN + hdr->nm_len - 1); + while (p_start <= p_end) { + flush_dcache_page(p_start); + p_start++; + } +#endif +} + +static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr) +{ + smp_rmb(); + flush_dcache_page(pgvec_to_page(hdr)); + return hdr->nm_status; +} + +static void netlink_set_status(struct nl_mmap_hdr *hdr, + enum nl_mmap_status status) +{ + hdr->nm_status = status; + flush_dcache_page(pgvec_to_page(hdr)); + smp_wmb(); +} + +static struct nl_mmap_hdr * +__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos) +{ + unsigned int pg_vec_pos, frame_off; + + pg_vec_pos = pos / ring->frames_per_block; + frame_off = pos % ring->frames_per_block; + + return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size); +} + +static struct nl_mmap_hdr * +netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos, + enum nl_mmap_status status) +{ + struct nl_mmap_hdr *hdr; + + hdr = __netlink_lookup_frame(ring, pos); + if (netlink_get_status(hdr) != status) + return NULL; + + return hdr; +} + +static struct nl_mmap_hdr * +netlink_current_frame(const struct netlink_ring *ring, + enum nl_mmap_status status) +{ + return netlink_lookup_frame(ring, ring->head, status); +} + +static struct nl_mmap_hdr * +netlink_previous_frame(const struct netlink_ring *ring, + enum nl_mmap_status status) +{ + unsigned int prev; + + prev = ring->head ? ring->head - 1 : ring->frame_max; + return netlink_lookup_frame(ring, prev, status); +} + +static void netlink_increment_head(struct netlink_ring *ring) +{ + ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0; +} + +static void netlink_forward_ring(struct netlink_ring *ring) +{ + unsigned int head = ring->head, pos = head; + const struct nl_mmap_hdr *hdr; + + do { + hdr = __netlink_lookup_frame(ring, pos); + if (hdr->nm_status == NL_MMAP_STATUS_UNUSED) + break; + if (hdr->nm_status != NL_MMAP_STATUS_SKIP) + break; + netlink_increment_head(ring); + } while (ring->head != head); +} + +static unsigned int netlink_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + struct netlink_sock *nlk = nlk_sk(sk); + unsigned int mask; + + mask = datagram_poll(file, sock, wait); + + spin_lock_bh(&sk->sk_receive_queue.lock); + if (nlk->rx_ring.pg_vec) { + netlink_forward_ring(&nlk->rx_ring); + if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED)) + mask |= POLLIN | POLLRDNORM; + } + spin_unlock_bh(&sk->sk_receive_queue.lock); + + spin_lock_bh(&sk->sk_write_queue.lock); + if (nlk->tx_ring.pg_vec) { + if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED)) + mask |= POLLOUT | POLLWRNORM; + } + spin_unlock_bh(&sk->sk_write_queue.lock); + + return mask; +} + +static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb) +{ + return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN); +} + +static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk, + struct netlink_ring *ring, + struct nl_mmap_hdr *hdr) +{ + unsigned int size; + void *data; + + size = ring->frame_size - NL_MMAP_HDRLEN; + data = (void *)hdr + NL_MMAP_HDRLEN; + + skb->head = data; + skb->data = data; + skb_reset_tail_pointer(skb); + skb->end = skb->tail + size; + skb->len = 0; + + skb->destructor = netlink_skb_destructor; + NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED; + NETLINK_CB(skb).sk = sk; +} #else /* CONFIG_NETLINK_MMAP */ +#define netlink_skb_is_mmaped(skb) false #define netlink_mmap sock_no_mmap +#define netlink_poll datagram_poll #endif /* CONFIG_NETLINK_MMAP */ static void netlink_destroy_callback(struct netlink_callback *cb) @@ -350,7 +503,35 @@ static void netlink_consume_callback(struct netlink_callback *cb) static void netlink_skb_destructor(struct sk_buff *skb) { - sock_rfree(skb); +#ifdef CONFIG_NETLINK_MMAP + struct nl_mmap_hdr *hdr; + struct netlink_ring *ring; + struct sock *sk; + + /* If a packet from the kernel to userspace was freed because of an + * error without being delivered to userspace, the kernel must reset + * the status. In the direction userspace to kernel, the status is + * always reset here after the packet was processed and freed. + */ + if (netlink_skb_is_mmaped(skb)) { + hdr = netlink_mmap_hdr(skb); + sk = NETLINK_CB(skb).sk; + + if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) { + hdr->nm_len = 0; + netlink_set_status(hdr, NL_MMAP_STATUS_VALID); + } + ring = &nlk_sk(sk)->rx_ring; + + WARN_ON(atomic_read(&ring->pending) == 0); + atomic_dec(&ring->pending); + sock_put(sk); + + skb->data = NULL; + } +#endif + if (skb->sk != NULL) + sock_rfree(skb); } static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) @@ -2349,7 +2530,7 @@ static const struct proto_ops netlink_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, - .poll = datagram_poll, + .poll = netlink_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, -- cgit v1.2.3-70-g09d2 From f9c2288837ba072b21dba955f04a4c97eaa77b1e Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:47:04 +0000 Subject: netlink: implement memory mapped recvmsg() Add support for mmap'ed recvmsg(). To allow the kernel to construct messages into the mapped area, a dataless skb is allocated and the data pointer is set to point into the ring frame. This means frames will be delivered to userspace in order of allocation instead of order of transmission. This usually doesn't matter since the order is either not determinable by userspace or message creation/transmission is serialized. The only case where this can have a visible difference is nfnetlink_queue. Userspace can't assume mmap'ed messages have ordered IDs anymore and needs to check this if using batched verdicts. For non-mapped sockets, nothing changes. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netlink.h | 2 + net/netlink/af_netlink.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 07c473848dbd..6358da5eeee8 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -64,6 +64,8 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group) extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); +extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, + u32 dst_portid, gfp_t gfp_mask); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 90504a0e42ab..d120b5d4d86a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -116,6 +116,11 @@ static bool netlink_skb_is_mmaped(const struct sk_buff *skb) return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; } +static bool netlink_rx_is_mmaped(struct sock *sk) +{ + return nlk_sk(sk)->rx_ring.pg_vec != NULL; +} + static bool netlink_tx_is_mmaped(struct sock *sk) { return nlk_sk(sk)->tx_ring.pg_vec != NULL; @@ -589,8 +594,54 @@ out: mutex_unlock(&nlk->pg_vec_lock); return err; } + +static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) +{ + struct nl_mmap_hdr *hdr; + + hdr = netlink_mmap_hdr(skb); + hdr->nm_len = skb->len; + hdr->nm_group = NETLINK_CB(skb).dst_group; + hdr->nm_pid = NETLINK_CB(skb).creds.pid; + hdr->nm_uid = NETLINK_CB(skb).creds.uid; + hdr->nm_gid = NETLINK_CB(skb).creds.gid; + netlink_frame_flush_dcache(hdr); + netlink_set_status(hdr, NL_MMAP_STATUS_VALID); + + NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED; + kfree_skb(skb); +} + +static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) +{ + struct netlink_sock *nlk = nlk_sk(sk); + struct netlink_ring *ring = &nlk->rx_ring; + struct nl_mmap_hdr *hdr; + + spin_lock_bh(&sk->sk_receive_queue.lock); + hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); + if (hdr == NULL) { + spin_unlock_bh(&sk->sk_receive_queue.lock); + kfree_skb(skb); + sk->sk_err = ENOBUFS; + sk->sk_error_report(sk); + return; + } + netlink_increment_head(ring); + __skb_queue_tail(&sk->sk_receive_queue, skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); + + hdr->nm_len = skb->len; + hdr->nm_group = NETLINK_CB(skb).dst_group; + hdr->nm_pid = NETLINK_CB(skb).creds.pid; + hdr->nm_uid = NETLINK_CB(skb).creds.uid; + hdr->nm_gid = NETLINK_CB(skb).creds.gid; + netlink_set_status(hdr, NL_MMAP_STATUS_COPY); +} + #else /* CONFIG_NETLINK_MMAP */ #define netlink_skb_is_mmaped(skb) false +#define netlink_rx_is_mmaped(sk) false #define netlink_tx_is_mmaped(sk) false #define netlink_mmap sock_no_mmap #define netlink_poll datagram_poll @@ -1381,7 +1432,14 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; - skb_queue_tail(&sk->sk_receive_queue, skb); +#ifdef CONFIG_NETLINK_MMAP + if (netlink_skb_is_mmaped(skb)) + netlink_queue_mmaped_skb(sk, skb); + else if (netlink_rx_is_mmaped(sk)) + netlink_ring_set_copied(sk, skb); + else +#endif /* CONFIG_NETLINK_MMAP */ + skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); return len; } @@ -1492,6 +1550,68 @@ retry: } EXPORT_SYMBOL(netlink_unicast); +struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, + u32 dst_portid, gfp_t gfp_mask) +{ +#ifdef CONFIG_NETLINK_MMAP + struct sock *sk = NULL; + struct sk_buff *skb; + struct netlink_ring *ring; + struct nl_mmap_hdr *hdr; + unsigned int maxlen; + + sk = netlink_getsockbyportid(ssk, dst_portid); + if (IS_ERR(sk)) + goto out; + + ring = &nlk_sk(sk)->rx_ring; + /* fast-path without atomic ops for common case: non-mmaped receiver */ + if (ring->pg_vec == NULL) + goto out_put; + + skb = alloc_skb_head(gfp_mask); + if (skb == NULL) + goto err1; + + spin_lock_bh(&sk->sk_receive_queue.lock); + /* check again under lock */ + if (ring->pg_vec == NULL) + goto out_free; + + maxlen = ring->frame_size - NL_MMAP_HDRLEN; + if (maxlen < size) + goto out_free; + + netlink_forward_ring(ring); + hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); + if (hdr == NULL) + goto err2; + netlink_ring_setup_skb(skb, sk, ring, hdr); + netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED); + atomic_inc(&ring->pending); + netlink_increment_head(ring); + + spin_unlock_bh(&sk->sk_receive_queue.lock); + return skb; + +err2: + kfree_skb(skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); +err1: + sock_put(sk); + return NULL; + +out_free: + kfree_skb(skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); +out_put: + sock_put(sk); +out: +#endif + return alloc_skb(size, gfp_mask); +} +EXPORT_SYMBOL_GPL(netlink_alloc_skb); + int netlink_has_listeners(struct sock *sk, unsigned int group) { int res = 0; @@ -2270,9 +2390,13 @@ static int netlink_dump(struct sock *sk) alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); - skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL); + if (!netlink_rx_is_mmaped(sk) && + atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + goto errout_skb; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL); if (!skb) goto errout_skb; + netlink_skb_set_owner_r(skb, sk); len = cb->dump(skb, cb); @@ -2327,6 +2451,19 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, if (cb == NULL) return -ENOBUFS; + /* Memory mapped dump requests need to be copied to avoid looping + * on the pending state in netlink_mmap_sendmsg() while the CB hold + * a reference to the skb. + */ + if (netlink_skb_is_mmaped(skb)) { + skb = skb_copy(skb, GFP_KERNEL); + if (skb == NULL) { + kfree(cb); + return -ENOBUFS; + } + } else + atomic_inc(&skb->users); + cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; @@ -2387,7 +2524,8 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) if (err) payload += nlmsg_len(nlh); - skb = nlmsg_new(payload, GFP_KERNEL); + skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload), + NETLINK_CB(in_skb).portid, GFP_KERNEL); if (!skb) { struct sock *sk; -- cgit v1.2.3-70-g09d2 From ec464e5dc504a164c5dbff4a06812d495e44e34d Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:47:08 +0000 Subject: netfilter: rename netlink related "pid" variables to "portid" Get rid of the confusing mix of pid and portid and use portid consistently for all netlink related socket identities. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 9 +++++---- include/net/netfilter/nf_conntrack.h | 2 +- include/net/netfilter/nf_conntrack_expect.h | 4 ++-- net/netfilter/nf_conntrack_core.c | 8 ++++---- net/netfilter/nf_conntrack_expect.c | 8 ++++---- net/netfilter/nfnetlink.c | 13 +++++++------ 6 files changed, 23 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index ecbb8e495912..60b164171daf 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -29,10 +29,11 @@ extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); extern int nfnetlink_has_listeners(struct net *net, unsigned int group); -extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, - int echo, gfp_t flags); -extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); -extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); +extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, + unsigned int group, int echo, gfp_t flags); +extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); +extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, + u32 portid, int flags); extern void nfnl_lock(__u8 subsys_id); extern void nfnl_unlock(__u8 subsys_id); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index caca0c4d6b4b..644d9c223d24 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -184,7 +184,7 @@ extern int nf_conntrack_hash_check_insert(struct nf_conn *ct); extern void nf_ct_delete_from_lists(struct nf_conn *ct); extern void nf_ct_dying_timeout(struct nf_conn *ct); -extern void nf_conntrack_flush_report(struct net *net, u32 pid, int report); +extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report); extern bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cbbae7621e22..3f3aecbc8632 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -88,7 +88,7 @@ nf_ct_find_expectation(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple); void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, - u32 pid, int report); + u32 portid, int report); static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) { nf_ct_unlink_expect_report(exp, 0, 0); @@ -106,7 +106,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t, u_int8_t, const __be16 *, const __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, - u32 pid, int report); + u32 portid, int report); static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) { return nf_ct_expect_related_report(expect, 0, 0); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 007e8c43d19a..54ddc2f8e7c9 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1260,7 +1260,7 @@ void nf_ct_iterate_cleanup(struct net *net, EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); struct __nf_ct_flush_report { - u32 pid; + u32 portid; int report; }; @@ -1275,7 +1275,7 @@ static int kill_report(struct nf_conn *i, void *data) /* If we fail to deliver the event, death_by_timeout() will retry */ if (nf_conntrack_event_report(IPCT_DESTROY, i, - fr->pid, fr->report) < 0) + fr->portid, fr->report) < 0) return 1; /* Avoid the delivery of the destroy event in death_by_timeout(). */ @@ -1298,10 +1298,10 @@ void nf_ct_free_hashtable(void *hash, unsigned int size) } EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); -void nf_conntrack_flush_report(struct net *net, u32 pid, int report) +void nf_conntrack_flush_report(struct net *net, u32 portid, int report) { struct __nf_ct_flush_report fr = { - .pid = pid, + .portid = portid, .report = report, }; nf_ct_iterate_cleanup(net, kill_report, &fr); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 8c10e3db3d9b..0adfdcc68bae 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -40,7 +40,7 @@ static struct kmem_cache *nf_ct_expect_cachep __read_mostly; /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, - u32 pid, int report) + u32 portid, int report) { struct nf_conn_help *master_help = nfct_help(exp->master); struct net *net = nf_ct_exp_net(exp); @@ -54,7 +54,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, hlist_del(&exp->lnode); master_help->expecting[exp->class]--; - nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report); + nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); nf_ct_expect_put(exp); NF_CT_STAT_INC(net, expect_delete); @@ -412,7 +412,7 @@ out: } int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, - u32 pid, int report) + u32 portid, int report) { int ret; @@ -425,7 +425,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, if (ret < 0) goto out; spin_unlock_bh(&nf_conntrack_lock); - nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report); + nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report); return ret; out: spin_unlock_bh(&nf_conntrack_lock); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index bc4c499adb13..1640bd7dcdf4 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -112,22 +112,23 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group) } EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); -int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, +int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags) { - return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); + return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags); } EXPORT_SYMBOL_GPL(nfnetlink_send); -int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) +int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) { - return netlink_set_err(net->nfnl, pid, group, error); + return netlink_set_err(net->nfnl, portid, group, error); } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, + int flags) { - return netlink_unicast(net->nfnl, skb, pid, flags); + return netlink_unicast(net->nfnl, skb, portid, flags); } EXPORT_SYMBOL_GPL(nfnetlink_unicast); -- cgit v1.2.3-70-g09d2 From 3ab1f683bf8be7aa7869cc3ffb8d1db2ec8c8307 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 17 Apr 2013 06:47:09 +0000 Subject: nfnetlink: add support for memory mapped netlink Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 2 ++ net/netfilter/nfnetlink.c | 7 +++++++ net/netfilter/nfnetlink_log.c | 10 ++++++---- net/netfilter/nfnetlink_queue_core.c | 3 ++- 4 files changed, 17 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 60b164171daf..cadb7402d7a7 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -29,6 +29,8 @@ extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); extern int nfnetlink_has_listeners(struct net *net, unsigned int group); +extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, + u32 dst_portid, gfp_t gfp_mask); extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 1640bd7dcdf4..572d87dc116f 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -112,6 +112,13 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group) } EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); +struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, + u32 dst_portid, gfp_t gfp_mask) +{ + return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask); +} +EXPORT_SYMBOL_GPL(nfnetlink_alloc_skb); + int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags) { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 50aaf716cd68..d4199eb9b338 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -318,7 +318,7 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) } static struct sk_buff * -nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) +nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size) { struct sk_buff *skb; unsigned int n; @@ -327,13 +327,14 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) * message. WARNING: has to be <= 128k due to slab restrictions */ n = max(inst_size, pkt_size); - skb = alloc_skb(n, GFP_ATOMIC); + skb = nfnetlink_alloc_skb(&init_net, n, peer_portid, GFP_ATOMIC); if (!skb) { if (n > pkt_size) { /* try to allocate only as much as we need for current * packet */ - skb = alloc_skb(pkt_size, GFP_ATOMIC); + skb = nfnetlink_alloc_skb(&init_net, pkt_size, + peer_portid, GFP_ATOMIC); if (!skb) pr_err("nfnetlink_log: can't even alloc %u bytes\n", pkt_size); @@ -696,7 +697,8 @@ nfulnl_log_packet(u_int8_t pf, } if (!inst->skb) { - inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size); + inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz, + size); if (!inst->skb) goto alloc_failure; } diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 5e280b3e154f..ef3cdb4bfeea 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -339,7 +339,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (queue->flags & NFQA_CFG_F_CONNTRACK) ct = nfqnl_ct_get(entskb, &size, &ctinfo); - skb = alloc_skb(size, GFP_ATOMIC); + skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid, + GFP_ATOMIC); if (!skb) return NULL; -- cgit v1.2.3-70-g09d2 From 6e94d1ef37e439bf45659cc071553574ccb98080 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 16 Apr 2013 01:29:10 +0000 Subject: net: socket: move ktime2ts to ktime header api Currently, ktime2ts is a small helper function that is only used in net/socket.c. Move this helper into the ktime API as a small inline function, so that i) it's maintained together with ktime routines, and ii) also other files can make use of it. The function is named ktime_to_timespec_cond() and placed into the generic part of ktime, since we internally make use of ktime_to_timespec(). ktime_to_timespec() itself does not check the ktime variable for zero, hence, we name this function ktime_to_timespec_cond() for only a conditional conversion, and adapt its users to it. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/ktime.h | 18 ++++++++++++++++++ net/socket.c | 20 ++++---------------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ktime.h b/include/linux/ktime.h index e83512f63df5..bbca12804d12 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -330,6 +330,24 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); +/** + * ktime_to_timespec_cond - convert a ktime_t variable to timespec + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Returns true if there was a successful conversion, false if kt was 0. + */ +static inline bool ktime_to_timespec_cond(const ktime_t kt, struct timespec *ts) +{ + if (kt.tv64) { + *ts = ktime_to_timespec(kt); + return true; + } else { + return false; + } +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an diff --git a/net/socket.c b/net/socket.c index 36883fea44f3..280283f03ccc 100644 --- a/net/socket.c +++ b/net/socket.c @@ -681,16 +681,6 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, } EXPORT_SYMBOL(kernel_sendmsg); -static int ktime2ts(ktime_t kt, struct timespec *ts) -{ - if (kt.tv64) { - *ts = ktime_to_timespec(kt); - return 1; - } else { - return 0; - } -} - /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ @@ -723,17 +713,15 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, memset(ts, 0, sizeof(ts)); - if (skb->tstamp.tv64 && - sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { - skb_get_timestampns(skb, ts + 0); + if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) && + ktime_to_timespec_cond(skb->tstamp, ts + 0)) empty = 0; - } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && - ktime2ts(shhwtstamps->syststamp, ts + 1)) + ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && - ktime2ts(shhwtstamps->hwtstamp, ts + 2)) + ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) -- cgit v1.2.3-70-g09d2 From 9fae27b33785cb6350f21449073d9f7d1a2bbfd6 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sat, 20 Apr 2013 23:51:41 +0000 Subject: net: vlan: fix dummy function signatures for CONFIG_VLAN=n Fix up some function signatures for CONFIG_VLAN=n that were missed during the 802.1ad support patches. Found by the kbuild robot. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a78f9390da87..52bd03b38962 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -104,7 +104,8 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, extern bool vlan_uses_dev(const struct net_device *dev); #else static inline struct net_device * -__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) +__vlan_find_dev_deep(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) { return NULL; } @@ -131,12 +132,12 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb) return skb; } -static inline int vlan_vid_add(struct net_device *dev, unsigned short vid) +static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) { return 0; } -static inline void vlan_vid_del(struct net_device *dev, unsigned short vid) +static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) { } -- cgit v1.2.3-70-g09d2 From d998735f443427c1530cac5eeda0a45c8cb60a57 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Tue, 23 Apr 2013 06:06:47 +0000 Subject: net/mlx4_core: Add timestamping device capability Add new device capability for timestamping support and query FW to retrieve it. Signed-off-by: Eugenia Emantayev Signed-off-by: Or Gerlitz Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 7 ++++++- include/linux/mlx4/device.h | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index ab470d991ade..f1e70979b4c1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -130,7 +130,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", - [4] = "Automatic mac reassignment support" + [4] = "Automatic MAC reassignment support", + [5] = "Time stamping support" }; int i; @@ -444,6 +445,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c +#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 @@ -560,6 +562,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); dev_cap->flags = flags | (u64)ext_flags << 32; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 1bc5a750b330..86ae260c2e53 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -152,7 +152,8 @@ enum { MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, - MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4 + MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, + MLX4_DEV_CAP_FLAG2_TS = 1LL << 5 }; enum { -- cgit v1.2.3-70-g09d2 From ddd8a6c12d7e494902a9435a9a7a543ef730b2d8 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Tue, 23 Apr 2013 06:06:48 +0000 Subject: net/mlx4_core: Read HCA frequency and map internal clock Read HCA frequency, read PCI clock bar and offset, map internal clock to PCI bar. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 11 ++++++ drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + drivers/net/ethernet/mellanox/mlx4/main.c | 57 +++++++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 6 +++- include/linux/mlx4/device.h | 1 + 5 files changed, 75 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index f1e70979b4c1..6776c257bd34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1013,6 +1013,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) #define QUERY_FW_COMM_BASE_OFFSET 0x40 #define QUERY_FW_COMM_BAR_OFFSET 0x48 +#define QUERY_FW_CLOCK_OFFSET 0x50 +#define QUERY_FW_CLOCK_BAR 0x58 + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); @@ -1087,6 +1090,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) fw->comm_bar, fw->comm_base); mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); + MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); + fw->clock_bar = (fw->clock_bar >> 6) * 2; + mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", + fw->clock_bar, fw->clock_offset); + /* * Round up number of system pages needed in case * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. @@ -1374,6 +1383,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, u8 byte_field; #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 +#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -1388,6 +1398,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, goto out; MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); + MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); /* QPC/EEC/CQC/EQC/RDMARC attributes */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 151c2bb380a6..fdf41665a059 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -162,6 +162,7 @@ struct mlx4_init_hca_param { u64 global_caps; u16 log_mc_entry_sz; u16 log_mc_hash_sz; + u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ u8 log_num_qps; u8 log_num_srqs; u8 log_num_cqs; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 16abde20e1fc..e81840faa6c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -513,6 +513,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; + dev->caps.hca_core_clock = hca_param.hca_core_clock; + memset(&dev_cap, 0, sizeof(dev_cap)); dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); @@ -1226,8 +1228,31 @@ static void unmap_bf_area(struct mlx4_dev *dev) io_mapping_free(mlx4_priv(dev)->bf_mapping); } +static int map_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clock_mapping = + ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + + priv->fw.clock_offset, MLX4_CLOCK_SIZE); + + if (!priv->clock_mapping) + return -ENOMEM; + + return 0; +} + +static void unmap_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->clock_mapping) + iounmap(priv->clock_mapping); +} + static void mlx4_close_hca(struct mlx4_dev *dev) { + unmap_internal_clock(dev); unmap_bf_area(dev); if (mlx4_is_slave(dev)) mlx4_slave_exit(dev); @@ -1445,6 +1470,37 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); goto err_free_icm; } + /* + * If TS is supported by FW + * read HCA frequency by QUERY_HCA command + */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + memset(&init_hca, 0, sizeof(init_hca)); + err = mlx4_QUERY_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + } else { + dev->caps.hca_core_clock = + init_hca.hca_core_clock; + } + + /* In case we got HCA frequency 0 - disable timestamping + * to avoid dividing by zero + */ + if (!dev->caps.hca_core_clock) { + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, + "HCA frequency is 0. Timestamping is not supported."); + } else if (map_internal_clock(dev)) { + /* + * Map internal clock, + * in case of failure disable timestamping + */ + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); + } + } } else { err = mlx4_init_slave(dev); if (err) { @@ -1478,6 +1534,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; unmap_bf: + unmap_internal_clock(dev); unmap_bf_area(dev); err_close: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 252f4ba7f32c..0567f01938ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -87,7 +87,8 @@ enum { MLX4_HCR_SIZE = 0x0001c, MLX4_CLR_INT_SIZE = 0x00008, MLX4_SLAVE_COMM_BASE = 0x0, - MLX4_COMM_PAGESIZE = 0x1000 + MLX4_COMM_PAGESIZE = 0x1000, + MLX4_CLOCK_SIZE = 0x00008 }; enum { @@ -403,6 +404,7 @@ struct mlx4_fw { u64 clr_int_base; u64 catas_offset; u64 comm_base; + u64 clock_offset; struct mlx4_icm *fw_icm; struct mlx4_icm *aux_icm; u32 catas_size; @@ -410,6 +412,7 @@ struct mlx4_fw { u8 clr_int_bar; u8 catas_bar; u8 comm_bar; + u8 clock_bar; }; struct mlx4_comm { @@ -826,6 +829,7 @@ struct mlx4_priv { struct list_head bf_list; struct mutex bf_mutex; struct io_mapping *bf_mapping; + void __iomem *clock_mapping; int reserved_mtts; int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 86ae260c2e53..e088290d9792 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -445,6 +445,7 @@ struct mlx4_caps { u8 eqe_factor; u32 userspace_caps; /* userspace must be aware of these */ u32 function_caps; /* VFs must be aware of these */ + u16 hca_core_clock; }; struct mlx4_buf_list { -- cgit v1.2.3-70-g09d2 From ec693d47010e8302e61e0bdf3f47496c5610641a Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 23 Apr 2013 06:06:49 +0000 Subject: net/mlx4_en: Add HW timestamping (TS) support The patch allows to enable/disable HW timestamping for incoming and/or outgoing packets. It adds and initializes all structs and callbacks needed by kernel TS API. To enable/disable HW timestamping appropriate ioctl should be used. Currently HWTSTAMP_FILTER_ALL/NONE and HWTSAMP_TX_ON/OFF only are supported. When enabling TS on receive flow - VLAN stripping will be disabled. Also were made all relevant changes in RX/TX flows to consider TS request and plant HW timestamps into relevant structures. mlx4_ib was fixed to compile with new mlx4_cq_alloc() signature. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/cq.c | 2 +- drivers/net/ethernet/mellanox/mlx4/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx4/cq.c | 10 +- drivers/net/ethernet/mellanox/mlx4/en_clock.c | 132 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_cq.c | 10 +- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 30 +++++ drivers/net/ethernet/mellanox/mlx4/en_main.c | 5 + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 75 ++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_resources.c | 3 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 29 ++++- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 29 ++++- drivers/net/ethernet/mellanox/mlx4/main.c | 22 ++++ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 21 +++- include/linux/mlx4/cq.h | 16 +++ include/linux/mlx4/device.h | 6 +- 15 files changed, 375 insertions(+), 17 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_clock.c (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index ae67df35dd4d..73b3a7132587 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -228,7 +228,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0); + cq->db.dma, &cq->mcq, vector, 0, 0); if (err) goto err_dbmap; diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 293127d28b33..3e9c70f15b42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -6,5 +6,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ obj-$(CONFIG_MLX4_EN) += mlx4_en.o mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ - en_resources.o en_netdev.o en_selftest.o + en_resources.o en_netdev.o en_selftest.o en_clock.o mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 0706623cfb96..004e4231af67 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -240,9 +240,10 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) __mlx4_cq_free_icm(dev, cqn); } -int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, - struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned vector, int collapsed) +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, + struct mlx4_cq *cq, unsigned vector, int collapsed, + int timestamp_en) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; @@ -276,6 +277,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(!!collapsed << 18); + if (timestamp_en) + cq_context->flags |= cpu_to_be32(1 << 19); + cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c new file mode 100644 index 000000000000..501c72f1fbeb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2012 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include + +#include "mlx4_en.h" + +int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing Time Stamp configuration\n"); + + priv->hwtstamp_config.tx_type = tx_type; + priv->hwtstamp_config.rx_filter = rx_filter; + + if (rx_filter != HWTSTAMP_FILTER_NONE) + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} + +/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) + */ +static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) +{ + struct mlx4_en_dev *mdev = + container_of(tc, struct mlx4_en_dev, cycles); + struct mlx4_dev *dev = mdev->dev; + + return mlx4_read_clock(dev) & tc->mask; +} + +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) +{ + u64 hi, lo; + struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; + + lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo); + hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16; + + return hi | lo; +} + +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp) +{ + u64 nsec; + + nsec = timecounter_cyc2time(&mdev->clock, timestamp); + + memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); + hwts->hwtstamp = ns_to_ktime(nsec); +} + +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) +{ + struct mlx4_dev *dev = mdev->dev; + + memset(&mdev->cycles, 0, sizeof(mdev->cycles)); + mdev->cycles.read = mlx4_en_read_clock; + mdev->cycles.mask = CLOCKSOURCE_MASK(48); + /* Using shift to make calculation more accurate. Since current HW + * clock frequency is 427 MHz, and cycles are given using a 48 bits + * register, the biggest shift when calculating using u64, is 14 + * (max_cycles * multiplier < 2^64) + */ + mdev->cycles.shift = 14; + mdev->cycles.mult = + clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); + + timecounter_init(&mdev->clock, &mdev->cycles, + ktime_to_ns(ktime_get_real())); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index b8d0854a7ad1..1e6c594d6d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -77,6 +77,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, struct mlx4_en_dev *mdev = priv->mdev; int err = 0; char name[25]; + int timestamp_en = 0; struct cpu_rmap *rmap = #ifdef CONFIG_RFS_ACCEL priv->dev->rx_cpu_rmap; @@ -123,8 +124,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (!cq->is_tx) cq->size = priv->rx_ring[cq->ring].actual_size; - err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, - cq->wqres.db.dma, &cq->mcq, cq->vector, 0); + if ((cq->is_tx && priv->hwtstamp_config.tx_type) || + (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + timestamp_en = 1; + + err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, + &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, + cq->vector, 0, timestamp_en); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 00f25b5f297f..bcf4d118e98c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1147,6 +1147,35 @@ out: return err; } +static int mlx4_en_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + info->so_timestamping |= + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + } + + return ret; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -1173,6 +1202,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .set_rxfh_indir = mlx4_en_set_rxfh_indir, .get_channels = mlx4_en_get_channels, .set_channels = mlx4_en_set_channels, + .get_ts_info = mlx4_en_get_ts_info, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index fc27800e9c38..a5c9df07a7d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -300,6 +300,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; } + + /* Initialize time stamp mechanism */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_init_timestamp(mdev); + return mdev; err_mr: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e7e27842d8d4..4cb9f3203973 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1916,6 +1916,75 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) return 0; } +static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* device doesn't support time stamping */ + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + return -EINVAL; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { + config.tx_type = HWTSTAMP_TX_OFF; + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx4_en_hwtstamp_ioctl(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1943,6 +2012,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_do_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, @@ -2054,6 +2124,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, spin_lock_init(&priv->filters_lock); #endif + /* Initialize time stamping config */ + priv->hwtstamp_config.flags = 0; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 10c24c784b70..91f2b2c43c12 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -42,6 +42,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int user_prio, struct mlx4_qp_context *context) { struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; memset(context, 0, sizeof *context); context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); @@ -65,6 +66,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->cqn_send = cpu_to_be32(cqn); context->cqn_recv = cpu_to_be32(cqn); context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); + if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) + context->param3 |= cpu_to_be32(1 << 30); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4006f8857cb5..02aee1ebd203 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -320,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, } ring->buf = ring->wqres.buf.direct.buf; + ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; + return 0; err_hwq: @@ -554,6 +556,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; @@ -565,6 +568,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int polled = 0; int ip_summed; int factor = priv->cqe_factor; + u64 timestamp; if (!priv->port_up) return 0; @@ -669,8 +673,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->data_len = length; gro_skb->ip_summed = CHECKSUM_UNNECESSARY; - if (cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vid = be16_to_cpu(cqe->sl_vid); __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); @@ -680,8 +685,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); skb_record_rx_queue(gro_skb, cq->ring); - napi_gro_frags(&cq->napi); + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, + skb_hwtstamps(gro_skb), + timestamp); + } + + napi_gro_frags(&cq->napi); goto next; } @@ -714,10 +726,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (dev->features & NETIF_F_RXHASH) skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); - if (be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) + if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_VLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), + timestamp); + } + /* Push it up the stack */ netif_receive_skb(skb); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 49308cc65ee7..b0a2d2b96e43 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -118,6 +118,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, } else ring->bf_enabled = true; + ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; + return 0; err_map: @@ -192,8 +194,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int index, u8 owner) + int index, u8 owner, u64 timestamp) { + struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; @@ -204,6 +207,12 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, int i; __be32 *ptr = (__be32 *)tx_desc; __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + struct skb_shared_hwtstamps hwts; + + if (timestamp) { + mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp); + skb_tstamp_tx(skb, &hwts); + } /* Optimize the common case when there are no wraparounds */ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { @@ -289,7 +298,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, - !!(ring->cons & ring->size)); + !!(ring->cons & ring->size), 0); ring->cons += ring->last_nr_txbb; cnt++; } @@ -318,6 +327,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; + u64 timestamp = 0; if (!priv->port_up) return; @@ -341,11 +351,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) do { txbbs_skipped += ring->last_nr_txbb; ring_index = (ring_index + ring->last_nr_txbb) & size_mask; + if (ring->tx_info[ring_index].ts_requested) + timestamp = mlx4_en_get_cqe_ts(cqe); + /* free next descriptor */ ring->last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, !!((ring->cons + txbbs_skipped) & - ring->size)); + ring->size), timestamp); packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while (ring_index != new_index); @@ -629,6 +642,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->skb = skb; tx_info->nr_txbb = nr_txbb; + /* + * For timestamping add flag to skb_shinfo and + * set flag for further reference + */ + if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_info->ts_requested = 1; + } + /* Prepare ctrl segement apart opcode+ownership, which depends on * whether LSO is used */ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e81840faa6c6..0d32a82458bf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1228,6 +1228,28 @@ static void unmap_bf_area(struct mlx4_dev *dev) io_mapping_free(mlx4_priv(dev)->bf_mapping); } +cycle_t mlx4_read_clock(struct mlx4_dev *dev) +{ + u32 clockhi, clocklo, clockhi1; + cycle_t cycles; + int i; + struct mlx4_priv *priv = mlx4_priv(dev); + + for (i = 0; i < 10; i++) { + clockhi = swab32(readl(priv->clock_mapping)); + clocklo = swab32(readl(priv->clock_mapping + 4)); + clockhi1 = swab32(readl(priv->clock_mapping)); + if (clockhi == clockhi1) + break; + } + + cycles = (u64) clockhi << 32 | (u64) clocklo; + + return cycles; +} +EXPORT_SYMBOL_GPL(mlx4_read_clock); + + static int map_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d4cb5d3b28a2..85b0754ec8ac 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -40,6 +40,7 @@ #include #include #include +#include #ifdef CONFIG_MLX4_EN_DCB #include #endif @@ -207,6 +208,7 @@ struct mlx4_en_tx_info { u8 linear; u8 data_offset; u8 inl; + u8 ts_requested; }; @@ -262,6 +264,7 @@ struct mlx4_en_tx_ring { struct mlx4_bf bf; bool bf_enabled; struct netdev_queue *tx_queue; + int hwtstamp_tx_type; }; struct mlx4_en_rx_desc { @@ -288,6 +291,7 @@ struct mlx4_en_rx_ring { unsigned long packets; unsigned long csum_ok; unsigned long csum_none; + int hwtstamp_rx_filter; }; struct mlx4_en_cq { @@ -348,6 +352,9 @@ struct mlx4_en_dev { u32 priv_pdn; spinlock_t uar_lock; u8 mac_removed[MLX4_MAX_PORTS + 1]; + struct cyclecounter cycles; + struct timecounter clock; + unsigned long last_overflow_check; }; @@ -525,6 +532,7 @@ struct mlx4_en_priv { struct device *ddev; int base_tx_qpn; struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; + struct hwtstamp_config hwtstamp_config; #ifdef CONFIG_MLX4_EN_DCB struct ieee_ets ets; @@ -639,7 +647,18 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); u64 mlx4_en_mac_to_u64(u8 *addr); /* - * Globals + * Functions for time stamping + */ +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe); +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp); +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); +int mlx4_en_timestamp_config(struct net_device *dev, + int tx_type, + int rx_filter); + +/* Globals */ extern const struct ethtool_ops mlx4_en_ethtool_ops; diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 6f65b2c8bb89..98fa492cf406 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -64,6 +64,22 @@ struct mlx4_err_cqe { u8 owner_sr_opcode; }; +struct mlx4_ts_cqe { + __be32 vlan_my_qpn; + __be32 immed_rss_invalid; + __be32 g_mlpath_rqpn; + __be32 timestamp_hi; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; + __be32 byte_cnt; + __be16 wqe_index; + __be16 checksum; + u8 reserved; + __be16 timestamp_lo; + u8 owner_sr_opcode; +} __packed; + enum { MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, MLX4_CQE_QPN_MASK = 0xffffff, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e088290d9792..2fbc1464b53b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -40,6 +40,8 @@ #include +#include + #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 #define MSIX_LEGACY_SZ 4 @@ -840,7 +842,7 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned vector, int collapsed); + unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); @@ -1031,4 +1033,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); +cycle_t mlx4_read_clock(struct mlx4_dev *dev); + #endif /* MLX4_DEVICE_H */ -- cgit v1.2.3-70-g09d2 From 5a8eb24292ffd68604cedeb24ad2b4bc02cfc037 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 25 Apr 2013 04:42:29 +0000 Subject: pci: Add SRIOV helper function to determine if VFs are assigned to guest This function is meant to add a helper function that will determine if a PF has any VFs that are currently assigned to a guest. We currently have been implementing this function per driver, and going forward I would like to avoid that by making this function generic and using this helper. v2: Removed extern from declaration of pci_vfs_assigned in pci.h and return 0 if SR-IOV is disabled with is inline with other PCI SRIOV functions. Signed-off-by: Alexander Duyck Acked-by: Bjorn Helgaas Signed-off-by: Jeff Kirsher --- drivers/pci/iov.c | 41 +++++++++++++++++++++++++++++++++++++++++ include/linux/pci.h | 5 +++++ 2 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ee599f274f05..c93071d428f5 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -728,6 +728,47 @@ int pci_num_vf(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_num_vf); +/** + * pci_vfs_assigned - returns number of VFs are assigned to a guest + * @dev: the PCI device + * + * Returns number of VFs belonging to this device that are assigned to a guest. + * If device is not a physical function returns -ENODEV. + */ +int pci_vfs_assigned(struct pci_dev *dev) +{ + struct pci_dev *vfdev; + unsigned int vfs_assigned = 0; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + + return vfs_assigned; +} +EXPORT_SYMBOL_GPL(pci_vfs_assigned); + /** * pci_sriov_set_totalvfs -- reduce the TotalVFs available * @dev: the PCI PF device diff --git a/include/linux/pci.h b/include/linux/pci.h index 710067f3618c..43e45ac36458 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1644,6 +1644,7 @@ extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); extern void pci_disable_sriov(struct pci_dev *dev); extern irqreturn_t pci_sriov_migration(struct pci_dev *dev); extern int pci_num_vf(struct pci_dev *dev); +int pci_vfs_assigned(struct pci_dev *dev); extern int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); extern int pci_sriov_get_totalvfs(struct pci_dev *dev); #else @@ -1662,6 +1663,10 @@ static inline int pci_num_vf(struct pci_dev *dev) { return 0; } +static inline int pci_vfs_assigned(struct pci_dev *dev) +{ + return 0; +} static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { return 0; -- cgit v1.2.3-70-g09d2 From 8f7ba3ca12f6f16526fa4a8aaf2cae91563eee69 Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 25 Apr 2013 05:22:27 +0000 Subject: net/mlx4: Add set VF mac address support Add ndo_set_vf_mac support which allows to set the MAC address for mlx4 VF Ethernet NICs from the host. Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 31 +++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 42 +++++++++++++++++++++++++- include/linux/mlx4/cmd.h | 1 + 3 files changed, 73 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0a301e1a0635..a029124fe2f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2016,3 +2016,34 @@ u32 mlx4_comm_get_version(void) { return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; } + +static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) +{ + if ((vf < 0) || (vf >= dev->num_vfs)) { + mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); + return -EINVAL; + } + + return vf+1; +} + +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->mac = mac; + mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + vf, port, s_info->mac); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 05c7c13bdbde..8293a92bf151 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2024,6 +2024,19 @@ static int mlx4_en_set_features(struct net_device *netdev, } +static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_en_mac_to_u64(mac); + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); +} + + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2048,6 +2061,30 @@ static const struct net_device_ops mlx4_netdev_ops = { #endif }; +static const struct net_device_ops mlx4_netdev_ops_master = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, + .ndo_set_vf_mac = mlx4_en_set_vf_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif +}; + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -2164,7 +2201,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* * Initialize netdev entry points */ - dev->netdev_ops = &mlx4_netdev_ops; + if (mlx4_is_master(priv->mdev->dev)) + dev->netdev_ops = &mlx4_netdev_ops_master; + else + dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; netif_set_real_num_tx_queues(dev, priv->tx_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 260695186256..f21ddc6203bd 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -232,6 +232,7 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); u32 mlx4_comm_get_version(void); +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) -- cgit v1.2.3-70-g09d2 From 3f7fb021d081c8aaac1d0cf69a288d21625e872e Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 25 Apr 2013 05:22:28 +0000 Subject: net/mlx4: Add set VF default vlan ID and priority support Add support to ndo_set_vf_vlan in the driver. Once this call is used the vport is considered to be in VST mode. In this mode, the PPF driver configures Ethernet QPs created by this VF to use this vlan id and priority. Currently RoCE isn't supported on that mode. The special values of VID=4095 or VID=0,UP=0 are considered as VGT. Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 72 ++++++++++++++++++++-- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 9 +++ drivers/net/ethernet/mellanox/mlx4/fw.c | 5 ++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 + drivers/net/ethernet/mellanox/mlx4/port.c | 4 +- .../net/ethernet/mellanox/mlx4/resource_tracker.c | 36 +++++++++++ include/linux/mlx4/cmd.h | 2 + include/linux/mlx4/device.h | 3 +- 8 files changed, 126 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a029124fe2f8..aad6f8dbfb4c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1492,14 +1492,48 @@ out: static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { - int port; + int port, err; + struct mlx4_vport_state *vp_admin; + struct mlx4_vport_oper_state *vp_oper; + for (port = 1; port <= MLX4_MAX_PORTS; port++) { - priv->mfunc.master.vf_oper[slave].vport[port].state = - priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper->state = *vp_admin; + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, &(vp_oper->vlan_idx)); + if (err) { + vp_oper->vlan_idx = NO_INDX; + mlx4_warn((&priv->dev), + "No vlan resorces slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_oper->state.default_vlan), + vp_oper->vlan_idx, slave, port); + } } return 0; } +static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) +{ + int port; + struct mlx4_vport_oper_state *vp_oper; + + for (port = 1; port <= MLX4_MAX_PORTS; port++) { + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if (NO_INDX != vp_oper->vlan_idx) { + __mlx4_unregister_vlan(&priv->dev, + port, vp_oper->vlan_idx); + vp_oper->vlan_idx = NO_INDX; + } + } + return; +} + static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, u16 param, u8 toggle) { @@ -1520,6 +1554,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (cmd == MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "Received reset from slave:%d\n", slave); slave_state[slave].active = false; + mlx4_master_deactivate_admin_state(priv, slave); for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { slave_state[slave].event_eq[i].eqn = -1; slave_state[slave].event_eq[i].token = 0; @@ -1566,7 +1601,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) goto reset_slave; slave_state[slave].vhcr_dma |= param; - mlx4_master_activate_admin_state(priv, slave); + if (mlx4_master_activate_admin_state(priv, slave)) + goto reset_slave; slave_state[slave].active = true; mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); break; @@ -1776,6 +1812,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) } INIT_LIST_HEAD(&s_state->mcast_filters[port]); priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT; + priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT; priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX; priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX; } @@ -2047,3 +2084,30 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); + +int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) + return -EPROTONOSUPPORT; + + if ((vlan > 4095) || (qos > 7)) + return -EINVAL; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + if ((0 == vlan) && (0 == qos)) + s_info->default_vlan = MLX4_VGT; + else + s_info->default_vlan = vlan; + s_info->default_qos = qos; + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8293a92bf151..c1f2c5b34d95 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2036,6 +2036,14 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); } +static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, @@ -2075,6 +2083,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, .ndo_set_vf_mac = mlx4_en_set_vf_mac, + .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 70d44adddc39..d2d30c940790 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -468,6 +468,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 +#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 @@ -655,6 +656,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(dev_cap->max_counters, outbox, QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 26)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { for (i = 1; i <= dev_cap->num_ports; ++i) { MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 7e1d10059eda..eac3dae10efe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1157,6 +1157,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev, void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index d3408add8742..946e0af5faef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -310,7 +310,7 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) } EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); -static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; @@ -384,7 +384,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) } EXPORT_SYMBOL_GPL(mlx4_register_vlan); -static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index f2d64435d8ef..5083d4babfe3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -353,6 +353,39 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, } } +static int update_vport_qp_param(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *inbox, + u8 slave) +{ + struct mlx4_qp_context *qpc = inbox->buf + 8; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + u32 qp_type; + int port; + + port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; + priv = mlx4_priv(dev); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + if (MLX4_QP_ST_RC == qp_type) + return -EINVAL; + + qpc->pri_path.vlan_index = vp_oper->vlan_idx; + qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ + qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ + qpc->pri_path.sched_queue &= 0xC7; + qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; + mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", + be32_to_cpu(qpc->local_qpn) & 0xffffff, port, + (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, + vp_oper->vlan_idx, (int)(qpc->pri_path.feup), + (int)(qpc->pri_path.fl)); + } + return 0; +} + static int mpt_mask(struct mlx4_dev *dev) { return dev->caps.num_mpts - 1; @@ -2798,6 +2831,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); + err = update_vport_qp_param(dev, inbox, slave); + if (err) + return err; return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index f21ddc6203bd..7daead75a6f5 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -233,6 +233,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo u32 mlx4_comm_get_version(void); int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); +int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); + #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 2fbc1464b53b..6606d8f5862a 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -155,7 +155,8 @@ enum { MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, - MLX4_DEV_CAP_FLAG2_TS = 1LL << 5 + MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, + MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6 }; enum { -- cgit v1.2.3-70-g09d2 From e6b6a2316379feebebacec0979b3ebc5743e7502 Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 25 Apr 2013 05:22:29 +0000 Subject: net/mlx4: Add VF MAC spoof checking support Add ndo_set_vf_spoofchk support Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 40 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 9 +++++ drivers/net/ethernet/mellanox/mlx4/fw.c | 2 ++ .../net/ethernet/mellanox/mlx4/resource_tracker.c | 8 +++++ include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/device.h | 3 +- 6 files changed, 62 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index aad6f8dbfb4c..eda347eeb5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1514,6 +1514,21 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) (int)(vp_oper->state.default_vlan), vp_oper->vlan_idx, slave, port); } + if (vp_admin->spoofchk) { + vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, + port, + vp_admin->mac); + if (0 > vp_oper->mac_idx) { + err = vp_oper->mac_idx; + vp_oper->mac_idx = NO_INDX; + mlx4_warn((&priv->dev), + "No mac resorces slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", + vp_oper->state.mac, vp_oper->mac_idx, slave, port); + } } return 0; } @@ -1530,6 +1545,10 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave port, vp_oper->vlan_idx); vp_oper->vlan_idx = NO_INDX; } + if (NO_INDX != vp_oper->mac_idx) { + __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx); + vp_oper->mac_idx = NO_INDX; + } } return; } @@ -2111,3 +2130,24 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); + +int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->spoofchk = setting; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c1f2c5b34d95..8d197b41242e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2044,6 +2044,14 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); } +static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, @@ -2084,6 +2092,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, + .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index d2d30c940790..b147bdd40768 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -659,6 +659,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 26)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; + if (field32 & (1 << 20)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { for (i = 1; i <= dev_cap->num_ports; ++i) { diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5083d4babfe3..e12e0d2e0ee0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -383,6 +383,14 @@ static int update_vport_qp_param(struct mlx4_dev *dev, vp_oper->vlan_idx, (int)(qpc->pri_path.feup), (int)(qpc->pri_path.fl)); } + if (vp_oper->state.spoofchk) { + qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; + qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; + mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", + be32_to_cpu(qpc->local_qpn) & 0xffffff, port, + (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, + vp_oper->mac_idx); + } return 0; } diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7daead75a6f5..95c3223719fa 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -234,6 +234,7 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo u32 mlx4_comm_get_version(void); int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); +int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6606d8f5862a..53acaf64189f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -156,7 +156,8 @@ enum { MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, - MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6 + MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7 }; enum { -- cgit v1.2.3-70-g09d2 From 2cccb9e4f3476da916146c2ec571c4f3eff738b1 Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 25 Apr 2013 05:22:30 +0000 Subject: net/mlx4: Add support to get VF config Support getting VF config. Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 33 ++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 +++++++ include/linux/mlx4/cmd.h | 2 ++ 3 files changed, 43 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index eda347eeb5dc..1df56cc50ee9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2151,3 +2151,36 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); + +int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + ivf->vf = vf; + + /* need to convert it to a func */ + ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff); + ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff); + ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff); + ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff); + ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); + ivf->mac[5] = ((s_info->mac) & 0xff); + + ivf->vlan = s_info->default_vlan; + ivf->qos = s_info->default_qos; + ivf->tx_rate = s_info->tx_rate; + ivf->spoofchk = s_info->spoofchk; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_get_vf_config); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8d197b41242e..a69a908614e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2052,6 +2052,13 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); } +static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); +} static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, @@ -2093,6 +2100,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_get_vf_config = mlx4_en_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 95c3223719fa..adf6e0648f20 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -34,6 +34,7 @@ #define MLX4_CMD_H #include +#include enum { /* initialization and general commands */ @@ -235,6 +236,7 @@ u32 mlx4_comm_get_version(void); int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); +int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) -- cgit v1.2.3-70-g09d2 From e8d9612c181b1a68ba5f71384629343466f1bd13 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 25 Apr 2013 06:53:54 +0000 Subject: sock_diag: allow to dump bpf filters This patch allows to dump BPF filters attached to a socket with SO_ATTACH_FILTER. Note that we check CAP_SYS_ADMIN before allowing to dump this info. For now, only AF_PACKET sockets use this feature. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/sock_diag.h | 3 +++ include/uapi/linux/packet_diag.h | 2 ++ net/core/sock_diag.c | 33 +++++++++++++++++++++++++++++++++ net/packet/diag.c | 4 ++++ 4 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index e8d702e0fd89..54f91d35e5fd 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -1,6 +1,7 @@ #ifndef __SOCK_DIAG_H__ #define __SOCK_DIAG_H__ +#include #include struct sk_buff; @@ -22,5 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie); void sock_diag_save_cookie(void *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); +int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, + struct sk_buff *skb, int attrtype); #endif diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index c0802c18c8ad..b2cc0cd9c4d9 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -17,6 +17,7 @@ struct packet_diag_req { #define PACKET_SHOW_RING_CFG 0x00000004 /* Rings configuration parameters */ #define PACKET_SHOW_FANOUT 0x00000008 #define PACKET_SHOW_MEMINFO 0x00000010 +#define PACKET_SHOW_FILTER 0x00000020 struct packet_diag_msg { __u8 pdiag_family; @@ -35,6 +36,7 @@ enum { PACKET_DIAG_FANOUT, PACKET_DIAG_UID, PACKET_DIAG_MEMINFO, + PACKET_DIAG_FILTER, __PACKET_DIAG_MAX, }; diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index a29e90cf36b7..d5bef0b0f639 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -49,6 +49,39 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) } EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); +int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, + struct sk_buff *skb, int attrtype) +{ + struct nlattr *attr; + struct sk_filter *filter; + unsigned int len; + int err = 0; + + if (!ns_capable(user_ns, CAP_NET_ADMIN)) { + nla_reserve(skb, attrtype, 0); + return 0; + } + + rcu_read_lock(); + + filter = rcu_dereference(sk->sk_filter); + len = filter ? filter->len * sizeof(struct sock_filter) : 0; + + attr = nla_reserve(skb, attrtype, len); + if (attr == NULL) { + err = -EMSGSIZE; + goto out; + } + + if (filter) + memcpy(nla_data(attr), filter->insns, len); + +out: + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(sock_diag_put_filterinfo); + void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) { mutex_lock(&sock_diag_table_mutex); diff --git a/net/packet/diag.c b/net/packet/diag.c index 822fe9b33a49..a9584a2f6d69 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -170,6 +170,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->pdiag_show & PACKET_SHOW_FILTER) && + sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER)) + goto out_nlmsg_trim; + return nlmsg_end(skb, nlh); out_nlmsg_trim: -- cgit v1.2.3-70-g09d2 From 5f5624cf156283687e11ea329c7a0523c677ea0e Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 25 Apr 2013 11:08:30 +0000 Subject: ipv6: Kill ipv6 dependency of icmpv6_send(). Following patch adds icmp-registration module for ipv6. It allows ipv6 protocol to register icmp_sender which is used for sending ipv6 icmp msgs. This extra layer allows us to kill ipv6 dependency for sending icmp packets. This patch also fixes ip_tunnel compilation problem when ip_tunnel is statically compiled in kernel but ipv6 is module Signed-off-by: Pravin B Shelar Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/icmpv6.h | 18 +++++++++++++++--- net/ipv6/Makefile | 2 +- net/ipv6/icmp.c | 35 ++++++++++++++++++++--------------- net/ipv6/ip6_icmp.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 19 deletions(-) create mode 100644 net/ipv6/ip6_icmp.c (limited to 'include/linux') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index b4f6c29caced..630f45335c73 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -11,9 +11,21 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include -extern void icmpv6_send(struct sk_buff *skb, - u8 type, u8 code, - __u32 info); +#if IS_ENABLED(CONFIG_IPV6) +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); + +typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info); +extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); +extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); + +#else + +static inline void icmpv6_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ + +} +#endif extern int icmpv6_init(void); extern int icmpv6_err_convert(u8 type, u8 code, diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 309af19a0a0a..9af088d2cdaa 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -40,7 +40,7 @@ obj-$(CONFIG_IPV6_SIT) += sit.o obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o obj-$(CONFIG_IPV6_GRE) += ip6_gre.o -obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o +obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 71b900c3f4ff..2a53a790514d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -123,15 +123,6 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk) spin_unlock_bh(&sk->sk_lock.slock); } -/* - * Slightly more convenient version of icmpv6_send. - */ -void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) -{ - icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); - kfree_skb(skb); -} - /* * Figure out, may we reply to this packet with icmp error. * @@ -332,7 +323,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *sk * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n"); + LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } @@ -381,7 +372,7 @@ relookup_failed: /* * Send an ICMP message in response to a packet in error */ -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; @@ -406,7 +397,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) /* * Make sure we respect the rules * i.e. RFC 1885 2.4(e) - * Rule (e.1) is enforced by not using icmpv6_send + * Rule (e.1) is enforced by not using icmp6_send * in any code that processes icmp errors. */ addr_type = ipv6_addr_type(&hdr->daddr); @@ -444,7 +435,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"); + LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n"); return; } @@ -452,7 +443,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"); + LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n"); return; } @@ -529,7 +520,14 @@ out_dst_release: out: icmpv6_xmit_unlock(sk); } -EXPORT_SYMBOL(icmpv6_send); + +/* Slightly more convenient version of icmp6_send. + */ +void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) +{ + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos); + kfree_skb(skb); +} static void icmpv6_echo_reply(struct sk_buff *skb) { @@ -885,8 +883,14 @@ int __init icmpv6_init(void) err = -EAGAIN; if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) goto fail; + + err = inet6_register_icmp_sender(icmp6_send); + if (err) + goto sender_reg_err; return 0; +sender_reg_err: + inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); fail: pr_err("Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); @@ -895,6 +899,7 @@ fail: void icmpv6_cleanup(void) { + inet6_unregister_icmp_sender(icmp6_send); unregister_pernet_subsys(&icmpv6_sk_ops); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); } diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c new file mode 100644 index 000000000000..4578e23834f7 --- /dev/null +++ b/net/ipv6/ip6_icmp.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include + +#include + +#if IS_ENABLED(CONFIG_IPV6) + +static ip6_icmp_send_t __rcu *ip6_icmp_send; + +int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? + 0 : -EBUSY; +} +EXPORT_SYMBOL(inet6_register_icmp_sender); + +int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + int ret; + + ret = (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, fn, NULL) == fn) ? + 0 : -EINVAL; + + synchronize_net(); + + return ret; +} +EXPORT_SYMBOL(inet6_unregister_icmp_sender); + +void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + ip6_icmp_send_t *send; + + rcu_read_lock(); + send = rcu_dereference(ip6_icmp_send); + + if (!send) + goto out; + send(skb, type, code, info); +out: + rcu_read_unlock(); +} +EXPORT_SYMBOL(icmpv6_send); +#endif -- cgit v1.2.3-70-g09d2 From 43c56e595bb81319230affd545392536c933317e Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 8 Apr 2013 21:51:25 +0200 Subject: netfilter: ipset: Make possible to test elements marked with nomatch Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set.h | 8 ++++++++ net/netfilter/ipset/ip_set_hash_ipportnet.c | 14 ++++++++------ net/netfilter/ipset/ip_set_hash_net.c | 14 ++++++++------ net/netfilter/ipset/ip_set_hash_netiface.c | 14 ++++++++------ net/netfilter/ipset/ip_set_hash_netport.c | 14 ++++++++------ 5 files changed, 40 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 7958e84a65af..970187187f5b 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -200,6 +200,14 @@ ip_set_eexist(int ret, u32 flags) return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST); } +/* Match elements marked with nomatch */ +static inline bool +ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt) +{ + return adt == IPSET_TEST && + ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH); +} + /* Check the NLA_F_NET_BYTEORDER flag */ static inline bool ip_set_attr_netorder(struct nlattr *tb[], int type) diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 10a30b4fc7db..b4836c81fbb7 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -279,10 +279,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; @@ -292,7 +292,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], data.ip = htonl(ip); data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1)); ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; @@ -610,15 +611,16 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(data.port); diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index d6a59154d710..6dbe0afc5a8d 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -225,16 +225,17 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { data.ip = htonl(ip & ip_set_hostmask(data.cidr)); ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; @@ -466,15 +467,16 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } /* Create hash:ip type of sets */ diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index f2b0a3c30130..248162020d80 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -396,13 +396,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) data.physdev = 1; - if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) - flags |= (cadt_flags << 16); + if (cadt_flags & IPSET_FLAG_NOMATCH) + flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { data.ip = htonl(ip & ip_set_hostmask(data.cidr)); ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } if (tb[IPSET_ATTR_IP_TO]) { @@ -704,13 +705,14 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) data.physdev = 1; - if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) - flags |= (cadt_flags << 16); + if (cadt_flags & IPSET_FLAG_NOMATCH) + flags |= (IPSET_FLAG_NOMATCH << 16); } ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } /* Create hash:ip type of sets */ diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 349deb672a2d..57b0550a0b0d 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -272,16 +272,17 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1)); ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } port = port_to = ntohs(data.port); @@ -561,15 +562,16 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } - if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { + if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) - flags |= (cadt_flags << 16); + flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &data, timeout, flags); - return ip_set_eexist(ret, flags) ? 0 : ret; + return ip_set_enomatch(ret, flags, adt) ? 1 : + ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(data.port); -- cgit v1.2.3-70-g09d2 From 8672d4d1a00b59057bb1f9659259967d2a19e086 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 8 Apr 2013 20:54:37 +0200 Subject: netfilter: ipset: Move often used IPv6 address masking function to header file Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/pfxlen.h | 9 +++++++++ net/netfilter/ipset/ip_set_hash_ip.c | 9 --------- net/netfilter/ipset/ip_set_hash_ipportnet.c | 9 --------- net/netfilter/ipset/ip_set_hash_net.c | 9 --------- net/netfilter/ipset/ip_set_hash_netiface.c | 9 --------- net/netfilter/ipset/ip_set_hash_netport.c | 9 --------- 6 files changed, 9 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h index 199fd11fedc0..1afbb94b4b65 100644 --- a/include/linux/netfilter/ipset/pfxlen.h +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -41,4 +41,13 @@ do { \ to = from | ~ip_set_hostmask(cidr); \ } while (0) +static inline void +ip6_netmask(union nf_inet_addr *ip, u8 prefix) +{ + ip->ip6[0] &= ip_set_netmask6(prefix)[0]; + ip->ip6[1] &= ip_set_netmask6(prefix)[1]; + ip->ip6[2] &= ip_set_netmask6(prefix)[2]; + ip->ip6[3] &= ip_set_netmask6(prefix)[3]; +} + #endif /*_PFXLEN_H */ diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index b7d4cb475ae6..c486ad2d43b7 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -255,15 +255,6 @@ hash_ip6_data_zero_out(struct hash_ip6_elem *elem) ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0); } -static inline void -ip6_netmask(union nf_inet_addr *ip, u8 prefix) -{ - ip->ip6[0] &= ip_set_netmask6(prefix)[0]; - ip->ip6[1] &= ip_set_netmask6(prefix)[1]; - ip->ip6[2] &= ip_set_netmask6(prefix)[2]; - ip->ip6[3] &= ip_set_netmask6(prefix)[3]; -} - static bool hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index b4836c81fbb7..352f8b4a63e5 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -445,15 +445,6 @@ hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) elem->proto = 0; } -static inline void -ip6_netmask(union nf_inet_addr *ip, u8 prefix) -{ - ip->ip6[0] &= ip_set_netmask6(prefix)[0]; - ip->ip6[1] &= ip_set_netmask6(prefix)[1]; - ip->ip6[2] &= ip_set_netmask6(prefix)[2]; - ip->ip6[3] &= ip_set_netmask6(prefix)[3]; -} - static inline void hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) { diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 6dbe0afc5a8d..370947c65b20 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -342,15 +342,6 @@ hash_net6_data_zero_out(struct hash_net6_elem *elem) elem->cidr = 0; } -static inline void -ip6_netmask(union nf_inet_addr *ip, u8 prefix) -{ - ip->ip6[0] &= ip_set_netmask6(prefix)[0]; - ip->ip6[1] &= ip_set_netmask6(prefix)[1]; - ip->ip6[2] &= ip_set_netmask6(prefix)[2]; - ip->ip6[3] &= ip_set_netmask6(prefix)[3]; -} - static inline void hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr) { diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 248162020d80..dd9843e3b06c 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -528,15 +528,6 @@ hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) elem->elem = 0; } -static inline void -ip6_netmask(union nf_inet_addr *ip, u8 prefix) -{ - ip->ip6[0] &= ip_set_netmask6(prefix)[0]; - ip->ip6[1] &= ip_set_netmask6(prefix)[1]; - ip->ip6[2] &= ip_set_netmask6(prefix)[2]; - ip->ip6[3] &= ip_set_netmask6(prefix)[3]; -} - static inline void hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) { diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 57b0550a0b0d..cd1d3c1df72a 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -406,15 +406,6 @@ hash_netport6_data_zero_out(struct hash_netport6_elem *elem) elem->proto = 0; } -static inline void -ip6_netmask(union nf_inet_addr *ip, u8 prefix) -{ - ip->ip6[0] &= ip_set_netmask6(prefix)[0]; - ip->ip6[1] &= ip_set_netmask6(prefix)[1]; - ip->ip6[2] &= ip_set_netmask6(prefix)[2]; - ip->ip6[3] &= ip_set_netmask6(prefix)[3]; -} - static inline void hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) { -- cgit v1.2.3-70-g09d2 From 075e64c041b5d3c29651965608e1e76505e01d54 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Sat, 27 Apr 2013 14:28:55 +0200 Subject: netfilter: ipset: Introduce extensions to elements in the core Introduce extensions to elements in the core and prepare timeout as the first one. This patch also modifies the em_ipset classifier to use the new extension struct layout. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set.h | 46 +++++++++-- include/linux/netfilter/ipset/ip_set_timeout.h | 102 ++++++------------------- net/netfilter/ipset/ip_set_core.c | 24 ++++-- net/netfilter/xt_set.c | 24 ++---- net/sched/em_ipset.c | 2 +- 5 files changed, 87 insertions(+), 111 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 970187187f5b..bf0220cbf46a 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000-2002 Joakim Axelsson * Patrick Schaaf * Martin Josefsson - * Copyright (C) 2003-2011 Jozsef Kadlecsik + * Copyright (C) 2003-2013 Jozsef Kadlecsik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -47,10 +47,30 @@ enum ip_set_feature { IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG), }; +/* Set extensions */ +enum ip_set_extension { + IPSET_EXT_NONE = 0, + IPSET_EXT_BIT_TIMEOUT = 1, + IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT), +}; + +/* Extension offsets */ +enum ip_set_offset { + IPSET_OFFSET_TIMEOUT = 0, + IPSET_OFFSET_MAX, +}; + +#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) + +struct ip_set_ext { + unsigned long timeout; +}; + struct ip_set; typedef int (*ipset_adtfn)(struct ip_set *set, void *value, - u32 timeout, u32 flags); + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags); /* Kernel API function options */ struct ip_set_adt_opt { @@ -58,7 +78,7 @@ struct ip_set_adt_opt { u8 dim; /* Dimension of match/target */ u8 flags; /* Direction and negation flags */ u32 cmdflags; /* Command-like flags */ - u32 timeout; /* Timeout value */ + struct ip_set_ext ext; /* Extensions */ }; /* Set type, variant-specific part */ @@ -69,7 +89,7 @@ struct ip_set_type_variant { * positive for matching element */ int (*kadt)(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, - enum ipset_adt adt, const struct ip_set_adt_opt *opt); + enum ipset_adt adt, struct ip_set_adt_opt *opt); /* Userspace: test/add/del entries * returns negative error code, @@ -151,6 +171,8 @@ struct ip_set { u8 family; /* The type revision */ u8 revision; + /* Extensions */ + u8 extensions; /* The type specific data */ void *data; }; @@ -167,19 +189,21 @@ extern void ip_set_nfnl_put(ip_set_id_t index); extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, const struct xt_action_param *par, - const struct ip_set_adt_opt *opt); + struct ip_set_adt_opt *opt); extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, const struct xt_action_param *par, - const struct ip_set_adt_opt *opt); + struct ip_set_adt_opt *opt); extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, const struct xt_action_param *par, - const struct ip_set_adt_opt *opt); + struct ip_set_adt_opt *opt); /* Utility functions */ extern void *ip_set_alloc(size_t size); extern void ip_set_free(void *members); extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); +extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], + struct ip_set_ext *ext); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) @@ -292,4 +316,12 @@ bitmap_bytes(u32 a, u32 b) return 4 * ((((b - a + 8) / 8) + 3) / 4); } +#include + +#define IP_SET_INIT_KEXT(skb, opt, map) \ + { .timeout = ip_set_adt_opt_timeout(opt, map) } + +#define IP_SET_INIT_UEXT(map) \ + { .timeout = (map)->timeout } + #endif /*_IP_SET_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 41d9cfa08167..3aac04167ca7 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -1,7 +1,7 @@ #ifndef _IP_SET_TIMEOUT_H #define _IP_SET_TIMEOUT_H -/* Copyright (C) 2003-2011 Jozsef Kadlecsik +/* Copyright (C) 2003-2013 Jozsef Kadlecsik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -17,13 +17,14 @@ #define IPSET_GC_PERIOD(timeout) \ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) -/* Set is defined without timeout support: timeout value may be 0 */ -#define IPSET_NO_TIMEOUT UINT_MAX +/* Entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 -#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT) +/* Set is defined with timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX -#define opt_timeout(opt, map) \ - (with_timeout((opt)->timeout) ? (opt)->timeout : (map)->timeout) +#define ip_set_adt_opt_timeout(opt, map) \ +((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout) static inline unsigned int ip_set_timeout_uget(struct nlattr *tb) @@ -38,61 +39,6 @@ ip_set_timeout_uget(struct nlattr *tb) return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; } -#ifdef IP_SET_BITMAP_TIMEOUT - -/* Bitmap specific timeout constants and macros for the entries */ - -/* Bitmap entry is unset */ -#define IPSET_ELEM_UNSET 0 -/* Bitmap entry is set with no timeout value */ -#define IPSET_ELEM_PERMANENT (UINT_MAX/2) - -static inline bool -ip_set_timeout_test(unsigned long timeout) -{ - return timeout != IPSET_ELEM_UNSET && - (timeout == IPSET_ELEM_PERMANENT || - time_is_after_jiffies(timeout)); -} - -static inline bool -ip_set_timeout_expired(unsigned long timeout) -{ - return timeout != IPSET_ELEM_UNSET && - timeout != IPSET_ELEM_PERMANENT && - time_is_before_jiffies(timeout); -} - -static inline unsigned long -ip_set_timeout_set(u32 timeout) -{ - unsigned long t; - - if (!timeout) - return IPSET_ELEM_PERMANENT; - - t = msecs_to_jiffies(timeout * 1000) + jiffies; - if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) - /* Bingo! */ - t++; - - return t; -} - -static inline u32 -ip_set_timeout_get(unsigned long timeout) -{ - return timeout == IPSET_ELEM_PERMANENT ? 0 : - jiffies_to_msecs(timeout - jiffies)/1000; -} - -#else - -/* Hash specific timeout constants and macros for the entries */ - -/* Hash entry is set with no timeout value */ -#define IPSET_ELEM_PERMANENT 0 - static inline bool ip_set_timeout_test(unsigned long timeout) { @@ -101,36 +47,32 @@ ip_set_timeout_test(unsigned long timeout) } static inline bool -ip_set_timeout_expired(unsigned long timeout) +ip_set_timeout_expired(unsigned long *timeout) { - return timeout != IPSET_ELEM_PERMANENT && - time_is_before_jiffies(timeout); + return *timeout != IPSET_ELEM_PERMANENT && + time_is_before_jiffies(*timeout); } -static inline unsigned long -ip_set_timeout_set(u32 timeout) +static inline void +ip_set_timeout_set(unsigned long *timeout, u32 t) { - unsigned long t; - - if (!timeout) - return IPSET_ELEM_PERMANENT; + if (!t) { + *timeout = IPSET_ELEM_PERMANENT; + return; + } - t = msecs_to_jiffies(timeout * 1000) + jiffies; - if (t == IPSET_ELEM_PERMANENT) + *timeout = msecs_to_jiffies(t * 1000) + jiffies; + if (*timeout == IPSET_ELEM_PERMANENT) /* Bingo! :-) */ - t++; - - return t; + (*timeout)--; } static inline u32 -ip_set_timeout_get(unsigned long timeout) +ip_set_timeout_get(unsigned long *timeout) { - return timeout == IPSET_ELEM_PERMANENT ? 0 : - jiffies_to_msecs(timeout - jiffies)/1000; + return *timeout == IPSET_ELEM_PERMANENT ? 0 : + jiffies_to_msecs(*timeout - jiffies)/1000; } -#endif /* ! IP_SET_BITMAP_TIMEOUT */ #endif /* __KERNEL__ */ - #endif /* _IP_SET_TIMEOUT_H */ diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 86f5e26f39d3..4486285d10da 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1,6 +1,6 @@ /* Copyright (C) 2000-2002 Joakim Axelsson * Patrick Schaaf - * Copyright (C) 2003-2011 Jozsef Kadlecsik + * Copyright (C) 2003-2013 Jozsef Kadlecsik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -315,6 +315,19 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); +int +ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], + struct ip_set_ext *ext) +{ + if (tb[IPSET_ATTR_TIMEOUT]) { + if (!(set->extensions & IPSET_EXT_TIMEOUT)) + return -IPSET_ERR_TIMEOUT; + ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); + } + return 0; +} +EXPORT_SYMBOL_GPL(ip_set_get_extensions); + /* * Creating/destroying/renaming/swapping affect the existence and * the properties of a set. All of these can be executed from userspace @@ -365,8 +378,7 @@ ip_set_rcu_get(ip_set_id_t index) int ip_set_test(ip_set_id_t index, const struct sk_buff *skb, - const struct xt_action_param *par, - const struct ip_set_adt_opt *opt) + const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(index); int ret = 0; @@ -404,8 +416,7 @@ EXPORT_SYMBOL_GPL(ip_set_test); int ip_set_add(ip_set_id_t index, const struct sk_buff *skb, - const struct xt_action_param *par, - const struct ip_set_adt_opt *opt) + const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(index); int ret; @@ -427,8 +438,7 @@ EXPORT_SYMBOL_GPL(ip_set_add); int ip_set_del(ip_set_id_t index, const struct sk_buff *skb, - const struct xt_action_param *par, - const struct ip_set_adt_opt *opt) + const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(index); int ret = 0; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 865a9e54f3ad..636c5199ff35 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -1,7 +1,7 @@ /* Copyright (C) 2000-2002 Joakim Axelsson * Patrick Schaaf * Martin Josefsson - * Copyright (C) 2003-2011 Jozsef Kadlecsik + * Copyright (C) 2003-2013 Jozsef Kadlecsik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -30,7 +30,7 @@ MODULE_ALIAS("ip6t_SET"); static inline int match_set(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, - const struct ip_set_adt_opt *opt, int inv) + struct ip_set_adt_opt *opt, int inv) { if (ip_set_test(index, skb, par, opt)) inv = !inv; @@ -38,20 +38,12 @@ match_set(ip_set_id_t index, const struct sk_buff *skb, } #define ADT_OPT(n, f, d, fs, cfs, t) \ -const struct ip_set_adt_opt n = { \ - .family = f, \ - .dim = d, \ - .flags = fs, \ - .cmdflags = cfs, \ - .timeout = t, \ -} -#define ADT_MOPT(n, f, d, fs, cfs, t) \ struct ip_set_adt_opt n = { \ .family = f, \ .dim = d, \ .flags = fs, \ .cmdflags = cfs, \ - .timeout = t, \ + .ext.timeout = t, \ } /* Revision 0 interface: backward compatible with netfilter/iptables */ @@ -305,15 +297,15 @@ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; - ADT_MOPT(add_opt, par->family, info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + ADT_OPT(add_opt, par->family, info->add_set.dim, + info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ - if (add_opt.timeout != IPSET_NO_TIMEOUT && - add_opt.timeout > UINT_MAX/MSEC_PER_SEC) - add_opt.timeout = UINT_MAX/MSEC_PER_SEC; + if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && + add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) + add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index 3130320997e2..938b7cbf5627 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -83,7 +83,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em, opt.dim = set->dim; opt.flags = set->flags; opt.cmdflags = 0; - opt.timeout = ~0u; + opt.ext.timeout = ~0u; network_offset = skb_network_offset(skb); skb_pull(skb, network_offset); -- cgit v1.2.3-70-g09d2 From 4d73de38c256623b324474098f7d2bb4e97f7cf0 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 8 Apr 2013 21:00:52 +0200 Subject: netfilter: ipset: Unified bitmap type generation Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set_bitmap.h | 6 + net/netfilter/ipset/ip_set_bitmap_gen.h | 265 ++++++++++++++++++++++++++ 2 files changed, 271 insertions(+) create mode 100644 net/netfilter/ipset/ip_set_bitmap_gen.h (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h index 1a30646d5be8..5e4662a71e01 100644 --- a/include/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -5,6 +5,12 @@ #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF +enum { + IPSET_ADD_FAILED = 1, + IPSET_ADD_STORE_PLAIN_TIMEOUT, + IPSET_ADD_START_STORED_TIMEOUT, +}; + /* Common functions */ static inline u32 diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h new file mode 100644 index 000000000000..b9931591cbe9 --- /dev/null +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -0,0 +1,265 @@ +/* Copyright (C) 2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __IP_SET_BITMAP_IP_GEN_H +#define __IP_SET_BITMAP_IP_GEN_H + +#define CONCAT(a, b) a##b +#define TOKEN(a,b) CONCAT(a, b) + +#define mtype_do_test TOKEN(MTYPE, _do_test) +#define mtype_gc_test TOKEN(MTYPE, _gc_test) +#define mtype_is_filled TOKEN(MTYPE, _is_filled) +#define mtype_do_add TOKEN(MTYPE, _do_add) +#define mtype_do_del TOKEN(MTYPE, _do_del) +#define mtype_do_list TOKEN(MTYPE, _do_list) +#define mtype_do_head TOKEN(MTYPE, _do_head) +#define mtype_adt_elem TOKEN(MTYPE, _adt_elem) +#define mtype_add_timeout TOKEN(MTYPE, _add_timeout) +#define mtype_gc_init TOKEN(MTYPE, _gc_init) +#define mtype_kadt TOKEN(MTYPE, _kadt) +#define mtype_uadt TOKEN(MTYPE, _uadt) +#define mtype_destroy TOKEN(MTYPE, _destroy) +#define mtype_flush TOKEN(MTYPE, _flush) +#define mtype_head TOKEN(MTYPE, _head) +#define mtype_same_set TOKEN(MTYPE, _same_set) +#define mtype_elem TOKEN(MTYPE, _elem) +#define mtype_test TOKEN(MTYPE, _test) +#define mtype_add TOKEN(MTYPE, _add) +#define mtype_del TOKEN(MTYPE, _del) +#define mtype_list TOKEN(MTYPE, _list) +#define mtype_gc TOKEN(MTYPE, _gc) +#define mtype MTYPE + +#define ext_timeout(e, m) \ + (unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT]) +#define get_ext(map, id) ((map)->extensions + (map)->dsize * (id)) + +static void +mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) +{ + struct mtype *map = set->data; + + init_timer(&map->gc); + map->gc.data = (unsigned long) set; + map->gc.function = gc; + map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; + add_timer(&map->gc); +} + +static void +mtype_destroy(struct ip_set *set) +{ + struct mtype *map = set->data; + + if (SET_WITH_TIMEOUT(set)) + del_timer_sync(&map->gc); + + ip_set_free(map->members); + if (map->dsize) + ip_set_free(map->extensions); + kfree(map); + + set->data = NULL; +} + +static void +mtype_flush(struct ip_set *set) +{ + struct mtype *map = set->data; + + memset(map->members, 0, map->memsize); +} + +static int +mtype_head(struct ip_set *set, struct sk_buff *skb) +{ + const struct mtype *map = set->data; + struct nlattr *nested; + + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) + goto nla_put_failure; + if (mtype_do_head(skb, map) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + + map->memsize + + map->dsize * map->elements)) || + (SET_WITH_TIMEOUT(set) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; + ipset_nest_end(skb, nested); + + return 0; +nla_put_failure: + return -EMSGSIZE; +} + +static int +mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct mtype *map = set->data; + const struct mtype_adt_elem *e = value; + void *x = get_ext(map, e->id); + int ret = mtype_do_test(e, map); + + if (ret <= 0) + return ret; + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(x, map))) + return 0; + return 1; +} + +static int +mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct mtype *map = set->data; + const struct mtype_adt_elem *e = value; + void *x = get_ext(map, e->id); + int ret = mtype_do_add(e, map, flags); + + if (ret == IPSET_ADD_FAILED) { + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(x, map))) + ret = 0; + else if (!(flags & IPSET_FLAG_EXIST)) + return -IPSET_ERR_EXIST; + } + + if (SET_WITH_TIMEOUT(set)) +#ifdef IP_SET_BITMAP_STORED_TIMEOUT + mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret); +#else + ip_set_timeout_set(ext_timeout(x, map), ext->timeout); +#endif + + return 0; +} + +static int +mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct mtype *map = set->data; + const struct mtype_adt_elem *e = value; + const void *x = get_ext(map, e->id); + + if (mtype_do_del(e, map) || + (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(x, map)))) + return -IPSET_ERR_EXIST; + + return 0; +} + +static int +mtype_list(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + struct mtype *map = set->data; + struct nlattr *adt, *nested; + void *x; + u32 id, first = cb->args[2]; + + adt = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!adt) + return -EMSGSIZE; + for (; cb->args[2] < map->elements; cb->args[2]++) { + id = cb->args[2]; + x = get_ext(map, id); + if (!test_bit(id, map->members) || + (SET_WITH_TIMEOUT(set) && +#ifdef IP_SET_BITMAP_STORED_TIMEOUT + mtype_is_filled((const struct mtype_elem *) x) && +#endif + ip_set_timeout_expired(ext_timeout(x, map)))) + continue; + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (id == first) { + nla_nest_cancel(skb, adt); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (mtype_do_list(skb, map, id)) + goto nla_put_failure; + if (SET_WITH_TIMEOUT(set)) { +#ifdef IP_SET_BITMAP_STORED_TIMEOUT + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_stored(map, id, + ext_timeout(x, map))))) + goto nla_put_failure; +#else + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get( + ext_timeout(x, map))))) + goto nla_put_failure; +#endif + } + ipset_nest_end(skb, nested); + } + ipset_nest_end(skb, adt); + + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nested); + ipset_nest_end(skb, adt); + if (unlikely(id == first)) { + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static void +mtype_gc(unsigned long ul_set) +{ + struct ip_set *set = (struct ip_set *) ul_set; + struct mtype *map = set->data; + const void *x; + u32 id; + + /* We run parallel with other readers (test element) + * but adding/deleting new entries is locked out */ + read_lock_bh(&set->lock); + for (id = 0; id < map->elements; id++) + if (mtype_gc_test(id, map)) { + x = get_ext(map, id); + if (ip_set_timeout_expired(ext_timeout(x, map))) + clear_bit(id, map->members); + } + read_unlock_bh(&set->lock); + + map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; + add_timer(&map->gc); +} + +static const struct ip_set_type_variant mtype = { + .kadt = mtype_kadt, + .uadt = mtype_uadt, + .adt = { + [IPSET_ADD] = mtype_add, + [IPSET_DEL] = mtype_del, + [IPSET_TEST] = mtype_test, + }, + .destroy = mtype_destroy, + .flush = mtype_flush, + .head = mtype_head, + .list = mtype_list, + .same_set = mtype_same_set, +}; + +#endif /* __IP_SET_BITMAP_IP_GEN_H */ -- cgit v1.2.3-70-g09d2 From 1feab10d7e6ddb5e13d6a4bd93a1b877390262ec Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 8 Apr 2013 21:05:44 +0200 Subject: netfilter: ipset: Unified hash type generation Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set_ahash.h | 1241 -------------------------- net/netfilter/ipset/ip_set_hash_gen.h | 1039 +++++++++++++++++++++ 2 files changed, 1039 insertions(+), 1241 deletions(-) delete mode 100644 include/linux/netfilter/ipset/ip_set_ahash.h create mode 100644 net/netfilter/ipset/ip_set_hash_gen.h (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h deleted file mode 100644 index 0214c4c146fa..000000000000 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ /dev/null @@ -1,1241 +0,0 @@ -#ifndef _IP_SET_AHASH_H -#define _IP_SET_AHASH_H - -#include -#include -#include - -#define CONCAT(a, b, c) a##b##c -#define TOKEN(a, b, c) CONCAT(a, b, c) - -#define type_pf_next TOKEN(TYPE, PF, _elem) - -/* Hashing which uses arrays to resolve clashing. The hash table is resized - * (doubled) when searching becomes too long. - * Internally jhash is used with the assumption that the size of the - * stored data is a multiple of sizeof(u32). If storage supports timeout, - * the timeout field must be the last one in the data structure - that field - * is ignored when computing the hash key. - * - * Readers and resizing - * - * Resizing can be triggered by userspace command only, and those - * are serialized by the nfnl mutex. During resizing the set is - * read-locked, so the only possible concurrent operations are - * the kernel side readers. Those must be protected by proper RCU locking. - */ - -/* Number of elements to store in an initial array block */ -#define AHASH_INIT_SIZE 4 -/* Max number of elements to store in an array block */ -#define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) - -/* Max number of elements can be tuned */ -#ifdef IP_SET_HASH_WITH_MULTI -#define AHASH_MAX(h) ((h)->ahash_max) - -static inline u8 -tune_ahash_max(u8 curr, u32 multi) -{ - u32 n; - - if (multi < curr) - return curr; - - n = curr + AHASH_INIT_SIZE; - /* Currently, at listing one hash bucket must fit into a message. - * Therefore we have a hard limit here. - */ - return n > curr && n <= 64 ? n : curr; -} -#define TUNE_AHASH_MAX(h, multi) \ - ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) -#else -#define AHASH_MAX(h) AHASH_MAX_SIZE -#define TUNE_AHASH_MAX(h, multi) -#endif - -/* A hash bucket */ -struct hbucket { - void *value; /* the array of the values */ - u8 size; /* size of the array */ - u8 pos; /* position of the first free entry */ -}; - -/* The hash table: the table size stored here in order to make resizing easy */ -struct htable { - u8 htable_bits; /* size of hash table == 2^htable_bits */ - struct hbucket bucket[0]; /* hashtable buckets */ -}; - -#define hbucket(h, i) (&((h)->bucket[i])) - -/* Book-keeping of the prefixes added to the set */ -struct ip_set_hash_nets { - u8 cidr; /* the different cidr values in the set */ - u32 nets; /* number of elements per cidr */ -}; - -/* The generic ip_set hash structure */ -struct ip_set_hash { - struct htable *table; /* the hash table */ - u32 maxelem; /* max elements in the hash */ - u32 elements; /* current element (vs timeout) */ - u32 initval; /* random jhash init value */ - u32 timeout; /* timeout value, if enabled */ - struct timer_list gc; /* garbage collection when timeout enabled */ - struct type_pf_next next; /* temporary storage for uadd */ -#ifdef IP_SET_HASH_WITH_MULTI - u8 ahash_max; /* max elements in an array block */ -#endif -#ifdef IP_SET_HASH_WITH_NETMASK - u8 netmask; /* netmask value for subnets to store */ -#endif -#ifdef IP_SET_HASH_WITH_RBTREE - struct rb_root rbtree; -#endif -#ifdef IP_SET_HASH_WITH_NETS - struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ -#endif -}; - -static size_t -htable_size(u8 hbits) -{ - size_t hsize; - - /* We must fit both into u32 in jhash and size_t */ - if (hbits > 31) - return 0; - hsize = jhash_size(hbits); - if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket) - < hsize) - return 0; - - return hsize * sizeof(struct hbucket) + sizeof(struct htable); -} - -/* Compute htable_bits from the user input parameter hashsize */ -static u8 -htable_bits(u32 hashsize) -{ - /* Assume that hashsize == 2^htable_bits */ - u8 bits = fls(hashsize - 1); - if (jhash_size(bits) != hashsize) - /* Round up to the first 2^n value */ - bits = fls(hashsize); - - return bits; -} - -#ifdef IP_SET_HASH_WITH_NETS -#ifdef IP_SET_HASH_WITH_NETS_PACKED -/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */ -#define CIDR(cidr) (cidr + 1) -#else -#define CIDR(cidr) (cidr) -#endif - -#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) -#ifdef IP_SET_HASH_WITH_MULTI -#define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1) -#else -#define NETS_LENGTH(family) SET_HOST_MASK(family) -#endif - -/* Network cidr size book keeping when the hash stores different - * sized networks */ -static void -add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) -{ - int i, j; - - /* Add in increasing prefix order, so larger cidr first */ - for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) { - if (j != -1) - continue; - else if (h->nets[i].cidr < cidr) - j = i; - else if (h->nets[i].cidr == cidr) { - h->nets[i].nets++; - return; - } - } - if (j != -1) { - for (; i > j; i--) { - h->nets[i].cidr = h->nets[i - 1].cidr; - h->nets[i].nets = h->nets[i - 1].nets; - } - } - h->nets[i].cidr = cidr; - h->nets[i].nets = 1; -} - -static void -del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) -{ - u8 i, j; - - for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++) - ; - h->nets[i].nets--; - - if (h->nets[i].nets != 0) - return; - - for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) { - h->nets[j].cidr = h->nets[j + 1].cidr; - h->nets[j].nets = h->nets[j + 1].nets; - } -} -#else -#define NETS_LENGTH(family) 0 -#endif - -/* Destroy the hashtable part of the set */ -static void -ahash_destroy(struct htable *t) -{ - struct hbucket *n; - u32 i; - - for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = hbucket(t, i); - if (n->size) - /* FIXME: use slab cache */ - kfree(n->value); - } - - ip_set_free(t); -} - -/* Calculate the actual memory size of the set data */ -static size_t -ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 nets_length) -{ - u32 i; - struct htable *t = h->table; - size_t memsize = sizeof(*h) - + sizeof(*t) -#ifdef IP_SET_HASH_WITH_NETS - + sizeof(struct ip_set_hash_nets) * nets_length -#endif - + jhash_size(t->htable_bits) * sizeof(struct hbucket); - - for (i = 0; i < jhash_size(t->htable_bits); i++) - memsize += t->bucket[i].size * dsize; - - return memsize; -} - -/* Flush a hash type of set: destroy all elements */ -static void -ip_set_hash_flush(struct ip_set *set) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - struct hbucket *n; - u32 i; - - for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = hbucket(t, i); - if (n->size) { - n->size = n->pos = 0; - /* FIXME: use slab cache */ - kfree(n->value); - } - } -#ifdef IP_SET_HASH_WITH_NETS - memset(h->nets, 0, sizeof(struct ip_set_hash_nets) - * NETS_LENGTH(set->family)); -#endif - h->elements = 0; -} - -/* Destroy a hash type of set */ -static void -ip_set_hash_destroy(struct ip_set *set) -{ - struct ip_set_hash *h = set->data; - - if (with_timeout(h->timeout)) - del_timer_sync(&h->gc); - - ahash_destroy(h->table); -#ifdef IP_SET_HASH_WITH_RBTREE - rbtree_destroy(&h->rbtree); -#endif - kfree(h); - - set->data = NULL; -} - -#endif /* _IP_SET_AHASH_H */ - -#ifndef HKEY_DATALEN -#define HKEY_DATALEN sizeof(struct type_pf_elem) -#endif - -#define HKEY(data, initval, htable_bits) \ -(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ - & jhash_mask(htable_bits)) - -/* Type/family dependent function prototypes */ - -#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) -#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) -#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) -#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out) -#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) -#define type_pf_data_list TOKEN(TYPE, PF, _data_list) -#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) -#define type_pf_data_next TOKEN(TYPE, PF, _data_next) -#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) -#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags) -#ifdef IP_SET_HASH_WITH_NETS -#define type_pf_data_match TOKEN(TYPE, PF, _data_match) -#else -#define type_pf_data_match(d) 1 -#endif - -#define type_pf_elem TOKEN(TYPE, PF, _elem) -#define type_pf_telem TOKEN(TYPE, PF, _telem) -#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout) -#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired) -#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set) - -#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add) -#define type_pf_add TOKEN(TYPE, PF, _add) -#define type_pf_del TOKEN(TYPE, PF, _del) -#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs) -#define type_pf_test TOKEN(TYPE, PF, _test) - -#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd) -#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem) -#define type_pf_expire TOKEN(TYPE, PF, _expire) -#define type_pf_tadd TOKEN(TYPE, PF, _tadd) -#define type_pf_tdel TOKEN(TYPE, PF, _tdel) -#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs) -#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest) - -#define type_pf_resize TOKEN(TYPE, PF, _resize) -#define type_pf_tresize TOKEN(TYPE, PF, _tresize) -#define type_pf_flush ip_set_hash_flush -#define type_pf_destroy ip_set_hash_destroy -#define type_pf_head TOKEN(TYPE, PF, _head) -#define type_pf_list TOKEN(TYPE, PF, _list) -#define type_pf_tlist TOKEN(TYPE, PF, _tlist) -#define type_pf_same_set TOKEN(TYPE, PF, _same_set) -#define type_pf_kadt TOKEN(TYPE, PF, _kadt) -#define type_pf_uadt TOKEN(TYPE, PF, _uadt) -#define type_pf_gc TOKEN(TYPE, PF, _gc) -#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init) -#define type_pf_variant TOKEN(TYPE, PF, _variant) -#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant) - -/* Flavour without timeout */ - -/* Get the ith element from the array block n */ -#define ahash_data(n, i) \ - ((struct type_pf_elem *)((n)->value) + (i)) - -/* Add an element to the hash table when resizing the set: - * we spare the maintenance of the internal counters. */ -static int -type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value, - u8 ahash_max, u32 cadt_flags) -{ - struct type_pf_elem *data; - - if (n->pos >= n->size) { - void *tmp; - - if (n->size >= ahash_max) - /* Trigger rehashing */ - return -EAGAIN; - - tmp = kzalloc((n->size + AHASH_INIT_SIZE) - * sizeof(struct type_pf_elem), - GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - if (n->size) { - memcpy(tmp, n->value, - sizeof(struct type_pf_elem) * n->size); - kfree(n->value); - } - n->value = tmp; - n->size += AHASH_INIT_SIZE; - } - data = ahash_data(n, n->pos++); - type_pf_data_copy(data, value); -#ifdef IP_SET_HASH_WITH_NETS - /* Resizing won't overwrite stored flags */ - if (cadt_flags) - type_pf_data_flags(data, cadt_flags); -#endif - return 0; -} - -/* Resize a hash: create a new hash table with doubling the hashsize - * and inserting the elements to it. Repeat until we succeed or - * fail due to memory pressures. */ -static int -type_pf_resize(struct ip_set *set, bool retried) -{ - struct ip_set_hash *h = set->data; - struct htable *t, *orig = h->table; - u8 htable_bits = orig->htable_bits; - struct type_pf_elem *data; - struct hbucket *n, *m; - u32 i, j, flags = 0; - int ret; - -retry: - ret = 0; - htable_bits++; - pr_debug("attempt to resize set %s from %u to %u, t %p\n", - set->name, orig->htable_bits, htable_bits, orig); - if (!htable_bits) { - /* In case we have plenty of memory :-) */ - pr_warning("Cannot increase the hashsize of set %s further\n", - set->name); - return -IPSET_ERR_HASH_FULL; - } - t = ip_set_alloc(sizeof(*t) - + jhash_size(htable_bits) * sizeof(struct hbucket)); - if (!t) - return -ENOMEM; - t->htable_bits = htable_bits; - - read_lock_bh(&set->lock); - for (i = 0; i < jhash_size(orig->htable_bits); i++) { - n = hbucket(orig, i); - for (j = 0; j < n->pos; j++) { - data = ahash_data(n, j); -#ifdef IP_SET_HASH_WITH_NETS - flags = 0; - type_pf_data_reset_flags(data, &flags); -#endif - m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags); - if (ret < 0) { -#ifdef IP_SET_HASH_WITH_NETS - type_pf_data_flags(data, flags); -#endif - read_unlock_bh(&set->lock); - ahash_destroy(t); - if (ret == -EAGAIN) - goto retry; - return ret; - } - } - } - - rcu_assign_pointer(h->table, t); - read_unlock_bh(&set->lock); - - /* Give time to other readers of the set */ - synchronize_rcu_bh(); - - pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, - orig->htable_bits, orig, t->htable_bits, t); - ahash_destroy(orig); - - return 0; -} - -static inline void -type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); - -/* Add an element to a hash and update the internal counters when succeeded, - * otherwise report the proper error code. */ -static int -type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t; - const struct type_pf_elem *d = value; - struct hbucket *n; - int i, ret = 0; - u32 key, multi = 0; - u32 cadt_flags = flags >> 16; - - if (h->elements >= h->maxelem) { - if (net_ratelimit()) - pr_warning("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); - return -IPSET_ERR_HASH_FULL; - } - - rcu_read_lock_bh(); - t = rcu_dereference_bh(h->table); - key = HKEY(value, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) - if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { -#ifdef IP_SET_HASH_WITH_NETS - if (flags & IPSET_FLAG_EXIST) - /* Support overwriting just the flags */ - type_pf_data_flags(ahash_data(n, i), - cadt_flags); -#endif - ret = -IPSET_ERR_EXIST; - goto out; - } - TUNE_AHASH_MAX(h, multi); - ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags); - if (ret != 0) { - if (ret == -EAGAIN) - type_pf_data_next(h, d); - goto out; - } - -#ifdef IP_SET_HASH_WITH_NETS - add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); -#endif - h->elements++; -out: - rcu_read_unlock_bh(); - return ret; -} - -/* Delete an element from the hash: swap it with the last element - * and free up space if possible. - */ -static int -type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - const struct type_pf_elem *d = value; - struct hbucket *n; - int i; - struct type_pf_elem *data; - u32 key, multi = 0; - - key = HKEY(value, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_data(n, i); - if (!type_pf_data_equal(data, d, &multi)) - continue; - if (i != n->pos - 1) - /* Not last one */ - type_pf_data_copy(data, ahash_data(n, n->pos - 1)); - - n->pos--; - h->elements--; -#ifdef IP_SET_HASH_WITH_NETS - del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); -#endif - if (n->pos + AHASH_INIT_SIZE < n->size) { - void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) - * sizeof(struct type_pf_elem), - GFP_ATOMIC); - if (!tmp) - return 0; - n->size -= AHASH_INIT_SIZE; - memcpy(tmp, n->value, - n->size * sizeof(struct type_pf_elem)); - kfree(n->value); - n->value = tmp; - } - return 0; - } - - return -IPSET_ERR_EXIST; -} - -#ifdef IP_SET_HASH_WITH_NETS - -/* Special test function which takes into account the different network - * sizes added to the set */ -static int -type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - struct hbucket *n; - const struct type_pf_elem *data; - int i, j = 0; - u32 key, multi = 0; - u8 nets_length = NETS_LENGTH(set->family); - - pr_debug("test by nets\n"); - for (; j < nets_length && h->nets[j].nets && !multi; j++) { - type_pf_data_netmask(d, h->nets[j].cidr); - key = HKEY(d, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_data(n, i); - if (type_pf_data_equal(data, d, &multi)) - return type_pf_data_match(data); - } - } - return 0; -} -#endif - -/* Test whether the element is added to the set */ -static int -type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - struct type_pf_elem *d = value; - struct hbucket *n; - const struct type_pf_elem *data; - int i; - u32 key, multi = 0; - -#ifdef IP_SET_HASH_WITH_NETS - /* If we test an IP address and not a network address, - * try all possible network sizes */ - if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) - return type_pf_test_cidrs(set, d, timeout); -#endif - - key = HKEY(d, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_data(n, i); - if (type_pf_data_equal(data, d, &multi)) - return type_pf_data_match(data); - } - return 0; -} - -/* Reply a HEADER request: fill out the header part of the set */ -static int -type_pf_head(struct ip_set *set, struct sk_buff *skb) -{ - const struct ip_set_hash *h = set->data; - struct nlattr *nested; - size_t memsize; - - read_lock_bh(&set->lock); - memsize = ahash_memsize(h, with_timeout(h->timeout) - ? sizeof(struct type_pf_telem) - : sizeof(struct type_pf_elem), - NETS_LENGTH(set->family)); - read_unlock_bh(&set->lock); - - nested = ipset_nest_start(skb, IPSET_ATTR_DATA); - if (!nested) - goto nla_put_failure; - if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE, - htonl(jhash_size(h->table->htable_bits))) || - nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem))) - goto nla_put_failure; -#ifdef IP_SET_HASH_WITH_NETMASK - if (h->netmask != HOST_MASK && - nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask)) - goto nla_put_failure; -#endif - if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || - nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || - (with_timeout(h->timeout) && - nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)))) - goto nla_put_failure; - ipset_nest_end(skb, nested); - - return 0; -nla_put_failure: - return -EMSGSIZE; -} - -/* Reply a LIST/SAVE request: dump the elements of the specified set */ -static int -type_pf_list(const struct ip_set *set, - struct sk_buff *skb, struct netlink_callback *cb) -{ - const struct ip_set_hash *h = set->data; - const struct htable *t = h->table; - struct nlattr *atd, *nested; - const struct hbucket *n; - const struct type_pf_elem *data; - u32 first = cb->args[2]; - /* We assume that one hash bucket fills into one page */ - void *incomplete; - int i; - - atd = ipset_nest_start(skb, IPSET_ATTR_ADT); - if (!atd) - return -EMSGSIZE; - pr_debug("list hash set %s\n", set->name); - for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { - incomplete = skb_tail_pointer(skb); - n = hbucket(t, cb->args[2]); - pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); - for (i = 0; i < n->pos; i++) { - data = ahash_data(n, i); - pr_debug("list hash %lu hbucket %p i %u, data %p\n", - cb->args[2], n, i, data); - nested = ipset_nest_start(skb, IPSET_ATTR_DATA); - if (!nested) { - if (cb->args[2] == first) { - nla_nest_cancel(skb, atd); - return -EMSGSIZE; - } else - goto nla_put_failure; - } - if (type_pf_data_list(skb, data)) - goto nla_put_failure; - ipset_nest_end(skb, nested); - } - } - ipset_nest_end(skb, atd); - /* Set listing finished */ - cb->args[2] = 0; - - return 0; - -nla_put_failure: - nlmsg_trim(skb, incomplete); - ipset_nest_end(skb, atd); - if (unlikely(first == cb->args[2])) { - pr_warning("Can't list set %s: one bucket does not fit into " - "a message. Please report it!\n", set->name); - cb->args[2] = 0; - return -EMSGSIZE; - } - return 0; -} - -static int -type_pf_kadt(struct ip_set *set, const struct sk_buff *skb, - const struct xt_action_param *par, - enum ipset_adt adt, const struct ip_set_adt_opt *opt); -static int -type_pf_uadt(struct ip_set *set, struct nlattr *tb[], - enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); - -static const struct ip_set_type_variant type_pf_variant = { - .kadt = type_pf_kadt, - .uadt = type_pf_uadt, - .adt = { - [IPSET_ADD] = type_pf_add, - [IPSET_DEL] = type_pf_del, - [IPSET_TEST] = type_pf_test, - }, - .destroy = type_pf_destroy, - .flush = type_pf_flush, - .head = type_pf_head, - .list = type_pf_list, - .resize = type_pf_resize, - .same_set = type_pf_same_set, -}; - -/* Flavour with timeout support */ - -#define ahash_tdata(n, i) \ - (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i)) - -static inline u32 -type_pf_data_timeout(const struct type_pf_elem *data) -{ - const struct type_pf_telem *tdata = - (const struct type_pf_telem *) data; - - return tdata->timeout; -} - -static inline bool -type_pf_data_expired(const struct type_pf_elem *data) -{ - const struct type_pf_telem *tdata = - (const struct type_pf_telem *) data; - - return ip_set_timeout_expired(tdata->timeout); -} - -static inline void -type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) -{ - struct type_pf_telem *tdata = (struct type_pf_telem *) data; - - tdata->timeout = ip_set_timeout_set(timeout); -} - -static int -type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, - u8 ahash_max, u32 cadt_flags, u32 timeout) -{ - struct type_pf_elem *data; - - if (n->pos >= n->size) { - void *tmp; - - if (n->size >= ahash_max) - /* Trigger rehashing */ - return -EAGAIN; - - tmp = kzalloc((n->size + AHASH_INIT_SIZE) - * sizeof(struct type_pf_telem), - GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - if (n->size) { - memcpy(tmp, n->value, - sizeof(struct type_pf_telem) * n->size); - kfree(n->value); - } - n->value = tmp; - n->size += AHASH_INIT_SIZE; - } - data = ahash_tdata(n, n->pos++); - type_pf_data_copy(data, value); - type_pf_data_timeout_set(data, timeout); -#ifdef IP_SET_HASH_WITH_NETS - /* Resizing won't overwrite stored flags */ - if (cadt_flags) - type_pf_data_flags(data, cadt_flags); -#endif - return 0; -} - -/* Delete expired elements from the hashtable */ -static void -type_pf_expire(struct ip_set_hash *h, u8 nets_length) -{ - struct htable *t = h->table; - struct hbucket *n; - struct type_pf_elem *data; - u32 i; - int j; - - for (i = 0; i < jhash_size(t->htable_bits); i++) { - n = hbucket(t, i); - for (j = 0; j < n->pos; j++) { - data = ahash_tdata(n, j); - if (type_pf_data_expired(data)) { - pr_debug("expired %u/%u\n", i, j); -#ifdef IP_SET_HASH_WITH_NETS - del_cidr(h, CIDR(data->cidr), nets_length); -#endif - if (j != n->pos - 1) - /* Not last one */ - type_pf_data_copy(data, - ahash_tdata(n, n->pos - 1)); - n->pos--; - h->elements--; - } - } - if (n->pos + AHASH_INIT_SIZE < n->size) { - void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) - * sizeof(struct type_pf_telem), - GFP_ATOMIC); - if (!tmp) - /* Still try to delete expired elements */ - continue; - n->size -= AHASH_INIT_SIZE; - memcpy(tmp, n->value, - n->size * sizeof(struct type_pf_telem)); - kfree(n->value); - n->value = tmp; - } - } -} - -static int -type_pf_tresize(struct ip_set *set, bool retried) -{ - struct ip_set_hash *h = set->data; - struct htable *t, *orig = h->table; - u8 htable_bits = orig->htable_bits; - struct type_pf_elem *data; - struct hbucket *n, *m; - u32 i, j, flags = 0; - int ret; - - /* Try to cleanup once */ - if (!retried) { - i = h->elements; - write_lock_bh(&set->lock); - type_pf_expire(set->data, NETS_LENGTH(set->family)); - write_unlock_bh(&set->lock); - if (h->elements < i) - return 0; - } - -retry: - ret = 0; - htable_bits++; - pr_debug("attempt to resize set %s from %u to %u, t %p\n", - set->name, orig->htable_bits, htable_bits, orig); - if (!htable_bits) { - /* In case we have plenty of memory :-) */ - pr_warning("Cannot increase the hashsize of set %s further\n", - set->name); - return -IPSET_ERR_HASH_FULL; - } - t = ip_set_alloc(sizeof(*t) - + jhash_size(htable_bits) * sizeof(struct hbucket)); - if (!t) - return -ENOMEM; - t->htable_bits = htable_bits; - - read_lock_bh(&set->lock); - for (i = 0; i < jhash_size(orig->htable_bits); i++) { - n = hbucket(orig, i); - for (j = 0; j < n->pos; j++) { - data = ahash_tdata(n, j); -#ifdef IP_SET_HASH_WITH_NETS - flags = 0; - type_pf_data_reset_flags(data, &flags); -#endif - m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags, - ip_set_timeout_get(type_pf_data_timeout(data))); - if (ret < 0) { -#ifdef IP_SET_HASH_WITH_NETS - type_pf_data_flags(data, flags); -#endif - read_unlock_bh(&set->lock); - ahash_destroy(t); - if (ret == -EAGAIN) - goto retry; - return ret; - } - } - } - - rcu_assign_pointer(h->table, t); - read_unlock_bh(&set->lock); - - /* Give time to other readers of the set */ - synchronize_rcu_bh(); - - ahash_destroy(orig); - - return 0; -} - -static int -type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - const struct type_pf_elem *d = value; - struct hbucket *n; - struct type_pf_elem *data; - int ret = 0, i, j = AHASH_MAX(h) + 1; - bool flag_exist = flags & IPSET_FLAG_EXIST; - u32 key, multi = 0; - u32 cadt_flags = flags >> 16; - - if (h->elements >= h->maxelem) - /* FIXME: when set is full, we slow down here */ - type_pf_expire(h, NETS_LENGTH(set->family)); - if (h->elements >= h->maxelem) { - if (net_ratelimit()) - pr_warning("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); - return -IPSET_ERR_HASH_FULL; - } - - rcu_read_lock_bh(); - t = rcu_dereference_bh(h->table); - key = HKEY(d, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_tdata(n, i); - if (type_pf_data_equal(data, d, &multi)) { - if (type_pf_data_expired(data) || flag_exist) - /* Just timeout value may be updated */ - j = i; - else { - ret = -IPSET_ERR_EXIST; - goto out; - } - } else if (j == AHASH_MAX(h) + 1 && - type_pf_data_expired(data)) - j = i; - } - if (j != AHASH_MAX(h) + 1) { - data = ahash_tdata(n, j); -#ifdef IP_SET_HASH_WITH_NETS - del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family)); - add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); -#endif - type_pf_data_copy(data, d); - type_pf_data_timeout_set(data, timeout); -#ifdef IP_SET_HASH_WITH_NETS - type_pf_data_flags(data, cadt_flags); -#endif - goto out; - } - TUNE_AHASH_MAX(h, multi); - ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout); - if (ret != 0) { - if (ret == -EAGAIN) - type_pf_data_next(h, d); - goto out; - } - -#ifdef IP_SET_HASH_WITH_NETS - add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); -#endif - h->elements++; -out: - rcu_read_unlock_bh(); - return ret; -} - -static int -type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - const struct type_pf_elem *d = value; - struct hbucket *n; - int i; - struct type_pf_elem *data; - u32 key, multi = 0; - - key = HKEY(value, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_tdata(n, i); - if (!type_pf_data_equal(data, d, &multi)) - continue; - if (type_pf_data_expired(data)) - return -IPSET_ERR_EXIST; - if (i != n->pos - 1) - /* Not last one */ - type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); - - n->pos--; - h->elements--; -#ifdef IP_SET_HASH_WITH_NETS - del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); -#endif - if (n->pos + AHASH_INIT_SIZE < n->size) { - void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) - * sizeof(struct type_pf_telem), - GFP_ATOMIC); - if (!tmp) - return 0; - n->size -= AHASH_INIT_SIZE; - memcpy(tmp, n->value, - n->size * sizeof(struct type_pf_telem)); - kfree(n->value); - n->value = tmp; - } - return 0; - } - - return -IPSET_ERR_EXIST; -} - -#ifdef IP_SET_HASH_WITH_NETS -static int -type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - struct type_pf_elem *data; - struct hbucket *n; - int i, j = 0; - u32 key, multi = 0; - u8 nets_length = NETS_LENGTH(set->family); - - for (; j < nets_length && h->nets[j].nets && !multi; j++) { - type_pf_data_netmask(d, h->nets[j].cidr); - key = HKEY(d, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_tdata(n, i); -#ifdef IP_SET_HASH_WITH_MULTI - if (type_pf_data_equal(data, d, &multi)) { - if (!type_pf_data_expired(data)) - return type_pf_data_match(data); - multi = 0; - } -#else - if (type_pf_data_equal(data, d, &multi) && - !type_pf_data_expired(data)) - return type_pf_data_match(data); -#endif - } - } - return 0; -} -#endif - -static int -type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) -{ - struct ip_set_hash *h = set->data; - struct htable *t = h->table; - struct type_pf_elem *data, *d = value; - struct hbucket *n; - int i; - u32 key, multi = 0; - -#ifdef IP_SET_HASH_WITH_NETS - if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) - return type_pf_ttest_cidrs(set, d, timeout); -#endif - key = HKEY(d, h->initval, t->htable_bits); - n = hbucket(t, key); - for (i = 0; i < n->pos; i++) { - data = ahash_tdata(n, i); - if (type_pf_data_equal(data, d, &multi) && - !type_pf_data_expired(data)) - return type_pf_data_match(data); - } - return 0; -} - -static int -type_pf_tlist(const struct ip_set *set, - struct sk_buff *skb, struct netlink_callback *cb) -{ - const struct ip_set_hash *h = set->data; - const struct htable *t = h->table; - struct nlattr *atd, *nested; - const struct hbucket *n; - const struct type_pf_elem *data; - u32 first = cb->args[2]; - /* We assume that one hash bucket fills into one page */ - void *incomplete; - int i; - - atd = ipset_nest_start(skb, IPSET_ATTR_ADT); - if (!atd) - return -EMSGSIZE; - for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { - incomplete = skb_tail_pointer(skb); - n = hbucket(t, cb->args[2]); - for (i = 0; i < n->pos; i++) { - data = ahash_tdata(n, i); - pr_debug("list %p %u\n", n, i); - if (type_pf_data_expired(data)) - continue; - pr_debug("do list %p %u\n", n, i); - nested = ipset_nest_start(skb, IPSET_ATTR_DATA); - if (!nested) { - if (cb->args[2] == first) { - nla_nest_cancel(skb, atd); - return -EMSGSIZE; - } else - goto nla_put_failure; - } - if (type_pf_data_tlist(skb, data)) - goto nla_put_failure; - ipset_nest_end(skb, nested); - } - } - ipset_nest_end(skb, atd); - /* Set listing finished */ - cb->args[2] = 0; - - return 0; - -nla_put_failure: - nlmsg_trim(skb, incomplete); - ipset_nest_end(skb, atd); - if (unlikely(first == cb->args[2])) { - pr_warning("Can't list set %s: one bucket does not fit into " - "a message. Please report it!\n", set->name); - cb->args[2] = 0; - return -EMSGSIZE; - } - return 0; -} - -static const struct ip_set_type_variant type_pf_tvariant = { - .kadt = type_pf_kadt, - .uadt = type_pf_uadt, - .adt = { - [IPSET_ADD] = type_pf_tadd, - [IPSET_DEL] = type_pf_tdel, - [IPSET_TEST] = type_pf_ttest, - }, - .destroy = type_pf_destroy, - .flush = type_pf_flush, - .head = type_pf_head, - .list = type_pf_tlist, - .resize = type_pf_tresize, - .same_set = type_pf_same_set, -}; - -static void -type_pf_gc(unsigned long ul_set) -{ - struct ip_set *set = (struct ip_set *) ul_set; - struct ip_set_hash *h = set->data; - - pr_debug("called\n"); - write_lock_bh(&set->lock); - type_pf_expire(h, NETS_LENGTH(set->family)); - write_unlock_bh(&set->lock); - - h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; - add_timer(&h->gc); -} - -static void -type_pf_gc_init(struct ip_set *set) -{ - struct ip_set_hash *h = set->data; - - init_timer(&h->gc); - h->gc.data = (unsigned long) set; - h->gc.function = type_pf_gc; - h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; - add_timer(&h->gc); - pr_debug("gc initialized, run in every %u\n", - IPSET_GC_PERIOD(h->timeout)); -} - -#undef HKEY_DATALEN -#undef HKEY -#undef type_pf_data_equal -#undef type_pf_data_isnull -#undef type_pf_data_copy -#undef type_pf_data_zero_out -#undef type_pf_data_netmask -#undef type_pf_data_list -#undef type_pf_data_tlist -#undef type_pf_data_next -#undef type_pf_data_flags -#undef type_pf_data_reset_flags -#undef type_pf_data_match - -#undef type_pf_elem -#undef type_pf_telem -#undef type_pf_data_timeout -#undef type_pf_data_expired -#undef type_pf_data_timeout_set - -#undef type_pf_elem_add -#undef type_pf_add -#undef type_pf_del -#undef type_pf_test_cidrs -#undef type_pf_test - -#undef type_pf_elem_tadd -#undef type_pf_del_telem -#undef type_pf_expire -#undef type_pf_tadd -#undef type_pf_tdel -#undef type_pf_ttest_cidrs -#undef type_pf_ttest - -#undef type_pf_resize -#undef type_pf_tresize -#undef type_pf_flush -#undef type_pf_destroy -#undef type_pf_head -#undef type_pf_list -#undef type_pf_tlist -#undef type_pf_same_set -#undef type_pf_kadt -#undef type_pf_uadt -#undef type_pf_gc -#undef type_pf_gc_init -#undef type_pf_variant -#undef type_pf_tvariant diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h new file mode 100644 index 000000000000..2ba7d4e76cde --- /dev/null +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -0,0 +1,1039 @@ +/* Copyright (C) 2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _IP_SET_HASH_GEN_H +#define _IP_SET_HASH_GEN_H + +#include +#include +#include +#ifndef rcu_dereference_bh +#define rcu_dereference_bh(p) rcu_dereference(p) +#endif + +#define CONCAT(a, b) a##b +#define TOKEN(a, b) CONCAT(a, b) + +/* Hashing which uses arrays to resolve clashing. The hash table is resized + * (doubled) when searching becomes too long. + * Internally jhash is used with the assumption that the size of the + * stored data is a multiple of sizeof(u32). If storage supports timeout, + * the timeout field must be the last one in the data structure - that field + * is ignored when computing the hash key. + * + * Readers and resizing + * + * Resizing can be triggered by userspace command only, and those + * are serialized by the nfnl mutex. During resizing the set is + * read-locked, so the only possible concurrent operations are + * the kernel side readers. Those must be protected by proper RCU locking. + */ + +/* Number of elements to store in an initial array block */ +#define AHASH_INIT_SIZE 4 +/* Max number of elements to store in an array block */ +#define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) + +/* Max number of elements can be tuned */ +#ifdef IP_SET_HASH_WITH_MULTI +#define AHASH_MAX(h) ((h)->ahash_max) + +static inline u8 +tune_ahash_max(u8 curr, u32 multi) +{ + u32 n; + + if (multi < curr) + return curr; + + n = curr + AHASH_INIT_SIZE; + /* Currently, at listing one hash bucket must fit into a message. + * Therefore we have a hard limit here. + */ + return n > curr && n <= 64 ? n : curr; +} +#define TUNE_AHASH_MAX(h, multi) \ + ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) +#else +#define AHASH_MAX(h) AHASH_MAX_SIZE +#define TUNE_AHASH_MAX(h, multi) +#endif + +/* A hash bucket */ +struct hbucket { + void *value; /* the array of the values */ + u8 size; /* size of the array */ + u8 pos; /* position of the first free entry */ +}; + +/* The hash table: the table size stored here in order to make resizing easy */ +struct htable { + u8 htable_bits; /* size of hash table == 2^htable_bits */ + struct hbucket bucket[0]; /* hashtable buckets */ +}; + +#define hbucket(h, i) (&((h)->bucket[i])) + +/* Book-keeping of the prefixes added to the set */ +struct net_prefixes { + u8 cidr; /* the different cidr values in the set */ + u32 nets; /* number of elements per cidr */ +}; + +/* Compute the hash table size */ +static size_t +htable_size(u8 hbits) +{ + size_t hsize; + + /* We must fit both into u32 in jhash and size_t */ + if (hbits > 31) + return 0; + hsize = jhash_size(hbits); + if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket) + < hsize) + return 0; + + return hsize * sizeof(struct hbucket) + sizeof(struct htable); +} + +/* Compute htable_bits from the user input parameter hashsize */ +static u8 +htable_bits(u32 hashsize) +{ + /* Assume that hashsize == 2^htable_bits */ + u8 bits = fls(hashsize - 1); + if (jhash_size(bits) != hashsize) + /* Round up to the first 2^n value */ + bits = fls(hashsize); + + return bits; +} + +/* Destroy the hashtable part of the set */ +static void +ahash_destroy(struct htable *t) +{ + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) + /* FIXME: use slab cache */ + kfree(n->value); + } + + ip_set_free(t); +} + +static int +hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize) +{ + if (n->pos >= n->size) { + void *tmp; + + if (n->size >= ahash_max) + /* Trigger rehashing */ + return -EAGAIN; + + tmp = kzalloc((n->size + AHASH_INIT_SIZE) * dsize, + GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + if (n->size) { + memcpy(tmp, n->value, n->size * dsize); + kfree(n->value); + } + n->value = tmp; + n->size += AHASH_INIT_SIZE; + } + return 0; +} + +#ifdef IP_SET_HASH_WITH_NETS +#ifdef IP_SET_HASH_WITH_NETS_PACKED +/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */ +#define CIDR(cidr) (cidr + 1) +#else +#define CIDR(cidr) (cidr) +#endif + +#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) + +#ifdef IP_SET_HASH_WITH_MULTI +#define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1) +#else +#define NETS_LENGTH(family) SET_HOST_MASK(family) +#endif + +#else +#define NETS_LENGTH(family) 0 +#endif /* IP_SET_HASH_WITH_NETS */ + +#define ext_timeout(e, h) \ +(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT]) + +#endif /* _IP_SET_HASH_GEN_H */ + +/* Family dependent templates */ + +#undef ahash_data +#undef mtype_data_equal +#undef mtype_do_data_match +#undef mtype_data_set_flags +#undef mtype_data_reset_flags +#undef mtype_data_netmask +#undef mtype_data_list +#undef mtype_data_next +#undef mtype_elem + +#undef mtype_add_cidr +#undef mtype_del_cidr +#undef mtype_ahash_memsize +#undef mtype_flush +#undef mtype_destroy +#undef mtype_gc_init +#undef mtype_same_set +#undef mtype_kadt +#undef mtype_uadt +#undef mtype + +#undef mtype_add +#undef mtype_del +#undef mtype_test_cidrs +#undef mtype_test +#undef mtype_expire +#undef mtype_resize +#undef mtype_head +#undef mtype_list +#undef mtype_gc +#undef mtype_gc_init +#undef mtype_variant +#undef mtype_data_match + +#undef HKEY + +#define mtype_data_equal TOKEN(MTYPE, _data_equal) +#ifdef IP_SET_HASH_WITH_NETS +#define mtype_do_data_match TOKEN(MTYPE, _do_data_match) +#else +#define mtype_do_data_match(d) 1 +#endif +#define mtype_data_set_flags TOKEN(MTYPE, _data_set_flags) +#define mtype_data_reset_flags TOKEN(MTYPE, _data_reset_flags) +#define mtype_data_netmask TOKEN(MTYPE, _data_netmask) +#define mtype_data_list TOKEN(MTYPE, _data_list) +#define mtype_data_next TOKEN(MTYPE, _data_next) +#define mtype_elem TOKEN(MTYPE, _elem) +#define mtype_add_cidr TOKEN(MTYPE, _add_cidr) +#define mtype_del_cidr TOKEN(MTYPE, _del_cidr) +#define mtype_ahash_memsize TOKEN(MTYPE, _ahash_memsize) +#define mtype_flush TOKEN(MTYPE, _flush) +#define mtype_destroy TOKEN(MTYPE, _destroy) +#define mtype_gc_init TOKEN(MTYPE, _gc_init) +#define mtype_same_set TOKEN(MTYPE, _same_set) +#define mtype_kadt TOKEN(MTYPE, _kadt) +#define mtype_uadt TOKEN(MTYPE, _uadt) +#define mtype MTYPE + +#define mtype_elem TOKEN(MTYPE, _elem) +#define mtype_add TOKEN(MTYPE, _add) +#define mtype_del TOKEN(MTYPE, _del) +#define mtype_test_cidrs TOKEN(MTYPE, _test_cidrs) +#define mtype_test TOKEN(MTYPE, _test) +#define mtype_expire TOKEN(MTYPE, _expire) +#define mtype_resize TOKEN(MTYPE, _resize) +#define mtype_head TOKEN(MTYPE, _head) +#define mtype_list TOKEN(MTYPE, _list) +#define mtype_gc TOKEN(MTYPE, _gc) +#define mtype_variant TOKEN(MTYPE, _variant) +#define mtype_data_match TOKEN(MTYPE, _data_match) + +#ifndef HKEY_DATALEN +#define HKEY_DATALEN sizeof(struct mtype_elem) +#endif + +#define HKEY(data, initval, htable_bits) \ +(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ + & jhash_mask(htable_bits)) + +#ifndef htype +#define htype HTYPE + +/* The generic hash structure */ +struct htype { + struct htable *table; /* the hash table */ + u32 maxelem; /* max elements in the hash */ + u32 elements; /* current element (vs timeout) */ + u32 initval; /* random jhash init value */ + u32 timeout; /* timeout value, if enabled */ + size_t dsize; /* data struct size */ + size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */ + struct timer_list gc; /* garbage collection when timeout enabled */ + struct mtype_elem next; /* temporary storage for uadd */ +#ifdef IP_SET_HASH_WITH_MULTI + u8 ahash_max; /* max elements in an array block */ +#endif +#ifdef IP_SET_HASH_WITH_NETMASK + u8 netmask; /* netmask value for subnets to store */ +#endif +#ifdef IP_SET_HASH_WITH_RBTREE + struct rb_root rbtree; +#endif +#ifdef IP_SET_HASH_WITH_NETS + struct net_prefixes nets[0]; /* book-keeping of prefixes */ +#endif +}; +#endif + +#ifdef IP_SET_HASH_WITH_NETS +/* Network cidr size book keeping when the hash stores different + * sized networks */ +static void +mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length) +{ + int i, j; + + /* Add in increasing prefix order, so larger cidr first */ + for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) { + if (j != -1) + continue; + else if (h->nets[i].cidr < cidr) + j = i; + else if (h->nets[i].cidr == cidr) { + h->nets[i].nets++; + return; + } + } + if (j != -1) { + for (; i > j; i--) { + h->nets[i].cidr = h->nets[i - 1].cidr; + h->nets[i].nets = h->nets[i - 1].nets; + } + } + h->nets[i].cidr = cidr; + h->nets[i].nets = 1; +} + +static void +mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length) +{ + u8 i, j; + + for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++) + ; + h->nets[i].nets--; + + if (h->nets[i].nets != 0) + return; + + for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) { + h->nets[j].cidr = h->nets[j + 1].cidr; + h->nets[j].nets = h->nets[j + 1].nets; + } +} +#endif + +/* Calculate the actual memory size of the set data */ +static size_t +mtype_ahash_memsize(const struct htype *h, u8 nets_length) +{ + u32 i; + struct htable *t = h->table; + size_t memsize = sizeof(*h) + + sizeof(*t) +#ifdef IP_SET_HASH_WITH_NETS + + sizeof(struct net_prefixes) * nets_length +#endif + + jhash_size(t->htable_bits) * sizeof(struct hbucket); + + for (i = 0; i < jhash_size(t->htable_bits); i++) + memsize += t->bucket[i].size * h->dsize; + + return memsize; +} + +/* Flush a hash type of set: destroy all elements */ +static void +mtype_flush(struct ip_set *set) +{ + struct htype *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) { + n->size = n->pos = 0; + /* FIXME: use slab cache */ + kfree(n->value); + } + } +#ifdef IP_SET_HASH_WITH_NETS + memset(h->nets, 0, sizeof(struct net_prefixes) + * NETS_LENGTH(set->family)); +#endif + h->elements = 0; +} + +/* Destroy a hash type of set */ +static void +mtype_destroy(struct ip_set *set) +{ + struct htype *h = set->data; + + if (set->extensions & IPSET_EXT_TIMEOUT) + del_timer_sync(&h->gc); + + ahash_destroy(h->table); +#ifdef IP_SET_HASH_WITH_RBTREE + rbtree_destroy(&h->rbtree); +#endif + kfree(h); + + set->data = NULL; +} + +static void +mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) +{ + struct htype *h = set->data; + + init_timer(&h->gc); + h->gc.data = (unsigned long) set; + h->gc.function = gc; + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); + pr_debug("gc initialized, run in every %u\n", + IPSET_GC_PERIOD(h->timeout)); +} + +static bool +mtype_same_set(const struct ip_set *a, const struct ip_set *b) +{ + const struct htype *x = a->data; + const struct htype *y = b->data; + + /* Resizing changes htable_bits, so we ignore it */ + return x->maxelem == y->maxelem && + x->timeout == y->timeout && +#ifdef IP_SET_HASH_WITH_NETMASK + x->netmask == y->netmask && +#endif + a->extensions == b->extensions; +} + +/* Get the ith element from the array block n */ +#define ahash_data(n, i, dsize) \ + ((struct mtype_elem *)((n)->value + ((i) * (dsize)))) + +/* Delete expired elements from the hashtable */ +static void +mtype_expire(struct htype *h, u8 nets_length, size_t dsize) +{ + struct htable *t = h->table; + struct hbucket *n; + struct mtype_elem *data; + u32 i; + int j; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + for (j = 0; j < n->pos; j++) { + data = ahash_data(n, j, dsize); + if (ip_set_timeout_expired(ext_timeout(data, h))) { + pr_debug("expired %u/%u\n", i, j); +#ifdef IP_SET_HASH_WITH_NETS + mtype_del_cidr(h, CIDR(data->cidr), + nets_length); +#endif + if (j != n->pos - 1) + /* Not last one */ + memcpy(data, + ahash_data(n, n->pos - 1, dsize), + dsize); + n->pos--; + h->elements--; + } + } + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * dsize, + GFP_ATOMIC); + if (!tmp) + /* Still try to delete expired elements */ + continue; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, n->size * dsize); + kfree(n->value); + n->value = tmp; + } + } +} + +static void +mtype_gc(unsigned long ul_set) +{ + struct ip_set *set = (struct ip_set *) ul_set; + struct htype *h = set->data; + + pr_debug("called\n"); + write_lock_bh(&set->lock); + mtype_expire(h, NETS_LENGTH(set->family), h->dsize); + write_unlock_bh(&set->lock); + + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); +} + +/* Resize a hash: create a new hash table with doubling the hashsize + * and inserting the elements to it. Repeat until we succeed or + * fail due to memory pressures. */ +static int +mtype_resize(struct ip_set *set, bool retried) +{ + struct htype *h = set->data; + struct htable *t, *orig = h->table; + u8 htable_bits = orig->htable_bits; +#ifdef IP_SET_HASH_WITH_NETS + u8 flags; +#endif + struct mtype_elem *data; + struct mtype_elem *d; + struct hbucket *n, *m; + u32 i, j; + int ret; + + /* Try to cleanup once */ + if (SET_WITH_TIMEOUT(set) && !retried) { + i = h->elements; + write_lock_bh(&set->lock); + mtype_expire(set->data, NETS_LENGTH(set->family), + h->dsize); + write_unlock_bh(&set->lock); + if (h->elements < i) + return 0; + } + +retry: + ret = 0; + htable_bits++; + pr_debug("attempt to resize set %s from %u to %u, t %p\n", + set->name, orig->htable_bits, htable_bits, orig); + if (!htable_bits) { + /* In case we have plenty of memory :-) */ + pr_warning("Cannot increase the hashsize of set %s further\n", + set->name); + return -IPSET_ERR_HASH_FULL; + } + t = ip_set_alloc(sizeof(*t) + + jhash_size(htable_bits) * sizeof(struct hbucket)); + if (!t) + return -ENOMEM; + t->htable_bits = htable_bits; + + read_lock_bh(&set->lock); + for (i = 0; i < jhash_size(orig->htable_bits); i++) { + n = hbucket(orig, i); + for (j = 0; j < n->pos; j++) { + data = ahash_data(n, j, h->dsize); +#ifdef IP_SET_HASH_WITH_NETS + flags = 0; + mtype_data_reset_flags(data, &flags); +#endif + m = hbucket(t, HKEY(data, h->initval, htable_bits)); + ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize); + if (ret < 0) { +#ifdef IP_SET_HASH_WITH_NETS + mtype_data_reset_flags(data, &flags); +#endif + read_unlock_bh(&set->lock); + ahash_destroy(t); + if (ret == -EAGAIN) + goto retry; + return ret; + } + d = ahash_data(m, m->pos++, h->dsize); + memcpy(d, data, h->dsize); +#ifdef IP_SET_HASH_WITH_NETS + mtype_data_reset_flags(d, &flags); +#endif + } + } + + rcu_assign_pointer(h->table, t); + read_unlock_bh(&set->lock); + + /* Give time to other readers of the set */ + synchronize_rcu_bh(); + + pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, + orig->htable_bits, orig, t->htable_bits, t); + ahash_destroy(orig); + + return 0; +} + +/* Add an element to a hash and update the internal counters when succeeded, + * otherwise report the proper error code. */ +static int +mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct htype *h = set->data; + struct htable *t; + const struct mtype_elem *d = value; + struct mtype_elem *data; + struct hbucket *n; + int i, ret = 0; + int j = AHASH_MAX(h) + 1; + bool flag_exist = flags & IPSET_FLAG_EXIST; + u32 key, multi = 0; + + if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem) + /* FIXME: when set is full, we slow down here */ + mtype_expire(h, NETS_LENGTH(set->family), h->dsize); + + if (h->elements >= h->maxelem) { + if (net_ratelimit()) + pr_warning("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); + return -IPSET_ERR_HASH_FULL; + } + + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i, h->dsize); + if (mtype_data_equal(data, d, &multi)) { + if (flag_exist || + (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(data, h)))) { + /* Just the extensions could be overwritten */ + j = i; + goto reuse_slot; + } else { + ret = -IPSET_ERR_EXIST; + goto out; + } + } + /* Reuse first timed out entry */ + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(data, h)) && + j != AHASH_MAX(h) + 1) + j = i; + } +reuse_slot: + if (j != AHASH_MAX(h) + 1) { + /* Fill out reused slot */ + data = ahash_data(n, j, h->dsize); +#ifdef IP_SET_HASH_WITH_NETS + mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family)); + mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); +#endif + } else { + /* Use/create a new slot */ + TUNE_AHASH_MAX(h, multi); + ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize); + if (ret != 0) { + if (ret == -EAGAIN) + mtype_data_next(&h->next, d); + goto out; + } + data = ahash_data(n, n->pos++, h->dsize); +#ifdef IP_SET_HASH_WITH_NETS + mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); +#endif + h->elements++; + } + memcpy(data, d, sizeof(struct mtype_elem)); +#ifdef IP_SET_HASH_WITH_NETS + mtype_data_set_flags(data, flags); +#endif + if (SET_WITH_TIMEOUT(set)) + ip_set_timeout_set(ext_timeout(data, h), ext->timeout); + +out: + rcu_read_unlock_bh(); + return ret; +} + +/* Delete an element from the hash: swap it with the last element + * and free up space if possible. + */ +static int +mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct htype *h = set->data; + struct htable *t = h->table; + const struct mtype_elem *d = value; + struct mtype_elem *data; + struct hbucket *n; + int i; + u32 key, multi = 0; + + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i, h->dsize); + if (!mtype_data_equal(data, d, &multi)) + continue; + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(data, h))) + return -IPSET_ERR_EXIST; + if (i != n->pos - 1) + /* Not last one */ + memcpy(data, ahash_data(n, n->pos - 1, h->dsize), + h->dsize); + + n->pos--; + h->elements--; +#ifdef IP_SET_HASH_WITH_NETS + mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); +#endif + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * h->dsize, + GFP_ATOMIC); + if (!tmp) + return 0; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, n->size * h->dsize); + kfree(n->value); + n->value = tmp; + } + return 0; + } + + return -IPSET_ERR_EXIST; +} + +static inline int +mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext, + struct ip_set_ext *mext, struct ip_set *set, u32 flags) +{ + return mtype_do_data_match(data); +} + +#ifdef IP_SET_HASH_WITH_NETS +/* Special test function which takes into account the different network + * sizes added to the set */ +static int +mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct htype *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + struct mtype_elem *data; + int i, j = 0; + u32 key, multi = 0; + u8 nets_length = NETS_LENGTH(set->family); + + pr_debug("test by nets\n"); + for (; j < nets_length && h->nets[j].nets && !multi; j++) { + mtype_data_netmask(d, h->nets[j].cidr); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i, h->dsize); + if (!mtype_data_equal(data, d, &multi)) + continue; + if (SET_WITH_TIMEOUT(set)) { + if (!ip_set_timeout_expired( + ext_timeout(data, h))) + return mtype_data_match(data, ext, + mext, set, + flags); +#ifdef IP_SET_HASH_WITH_MULTI + multi = 0; +#endif + } else + return mtype_data_match(data, ext, + mext, set, flags); + } + } + return 0; +} +#endif + +/* Test whether the element is added to the set */ +static int +mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + struct htype *h = set->data; + struct htable *t = h->table; + struct mtype_elem *d = value; + struct hbucket *n; + struct mtype_elem *data; + int i; + u32 key, multi = 0; + +#ifdef IP_SET_HASH_WITH_NETS + /* If we test an IP address and not a network address, + * try all possible network sizes */ + if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) + return mtype_test_cidrs(set, d, ext, mext, flags); +#endif + + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i, h->dsize); + if (mtype_data_equal(data, d, &multi) && + !(SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(data, h)))) + return mtype_data_match(data, ext, mext, set, flags); + } + return 0; +} + +/* Reply a HEADER request: fill out the header part of the set */ +static int +mtype_head(struct ip_set *set, struct sk_buff *skb) +{ + const struct htype *h = set->data; + struct nlattr *nested; + size_t memsize; + + read_lock_bh(&set->lock); + memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family)); + read_unlock_bh(&set->lock); + + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) + goto nla_put_failure; + if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE, + htonl(jhash_size(h->table->htable_bits))) || + nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem))) + goto nla_put_failure; +#ifdef IP_SET_HASH_WITH_NETMASK + if (h->netmask != HOST_MASK && + nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask)) + goto nla_put_failure; +#endif + if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || + ((set->extensions & IPSET_EXT_TIMEOUT) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)))) + goto nla_put_failure; + ipset_nest_end(skb, nested); + + return 0; +nla_put_failure: + return -EMSGSIZE; +} + +/* Reply a LIST/SAVE request: dump the elements of the specified set */ +static int +mtype_list(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct htype *h = set->data; + const struct htable *t = h->table; + struct nlattr *atd, *nested; + const struct hbucket *n; + const struct mtype_elem *e; + u32 first = cb->args[2]; + /* We assume that one hash bucket fills into one page */ + void *incomplete; + int i; + + atd = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!atd) + return -EMSGSIZE; + pr_debug("list hash set %s\n", set->name); + for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { + incomplete = skb_tail_pointer(skb); + n = hbucket(t, cb->args[2]); + pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); + for (i = 0; i < n->pos; i++) { + e = ahash_data(n, i, h->dsize); + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(e, h))) + continue; + pr_debug("list hash %lu hbucket %p i %u, data %p\n", + cb->args[2], n, i, e); + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (cb->args[2] == first) { + nla_nest_cancel(skb, atd); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (mtype_data_list(skb, e)) + goto nla_put_failure; + if (SET_WITH_TIMEOUT(set) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get( + ext_timeout(e, h))))) + goto nla_put_failure; + ipset_nest_end(skb, nested); + } + } + ipset_nest_end(skb, atd); + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nlmsg_trim(skb, incomplete); + ipset_nest_end(skb, atd); + if (unlikely(first == cb->args[2])) { + pr_warning("Can't list set %s: one bucket does not fit into " + "a message. Please report it!\n", set->name); + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static int +TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb, + const struct xt_action_param *par, + enum ipset_adt adt, struct ip_set_adt_opt *opt); + +static int +TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); + +static const struct ip_set_type_variant mtype_variant = { + .kadt = mtype_kadt, + .uadt = mtype_uadt, + .adt = { + [IPSET_ADD] = mtype_add, + [IPSET_DEL] = mtype_del, + [IPSET_TEST] = mtype_test, + }, + .destroy = mtype_destroy, + .flush = mtype_flush, + .head = mtype_head, + .list = mtype_list, + .resize = mtype_resize, + .same_set = mtype_same_set, +}; + +#ifdef IP_SET_EMIT_CREATE +static int +TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags) +{ + u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; + u8 hbits; +#ifdef IP_SET_HASH_WITH_NETMASK + u8 netmask; +#endif + size_t hsize; + struct HTYPE *h; + + if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) + return -IPSET_ERR_INVALID_FAMILY; +#ifdef IP_SET_HASH_WITH_NETMASK + netmask = set->family == NFPROTO_IPV4 ? 32 : 128; + pr_debug("Create set %s with family %s\n", + set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); +#endif + + if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) + return -IPSET_ERR_PROTOCOL; + + if (tb[IPSET_ATTR_HASHSIZE]) { + hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); + if (hashsize < IPSET_MIMINAL_HASHSIZE) + hashsize = IPSET_MIMINAL_HASHSIZE; + } + + if (tb[IPSET_ATTR_MAXELEM]) + maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); + +#ifdef IP_SET_HASH_WITH_NETMASK + if (tb[IPSET_ATTR_NETMASK]) { + netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); + + if ((set->family == NFPROTO_IPV4 && netmask > 32) || + (set->family == NFPROTO_IPV6 && netmask > 128) || + netmask == 0) + return -IPSET_ERR_INVALID_NETMASK; + } +#endif + + hsize = sizeof(*h); +#ifdef IP_SET_HASH_WITH_NETS + hsize += sizeof(struct net_prefixes) * + (set->family == NFPROTO_IPV4 ? 32 : 128); +#endif + h = kzalloc(hsize, GFP_KERNEL); + if (!h) + return -ENOMEM; + + h->maxelem = maxelem; +#ifdef IP_SET_HASH_WITH_NETMASK + h->netmask = netmask; +#endif + get_random_bytes(&h->initval, sizeof(h->initval)); + h->timeout = IPSET_NO_TIMEOUT; + + hbits = htable_bits(hashsize); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); + if (!h->table) { + kfree(h); + return -ENOMEM; + } + h->table->htable_bits = hbits; + + set->data = h; + if (set->family == NFPROTO_IPV4) + set->variant = &TOKEN(HTYPE, 4_variant); + else + set->variant = &TOKEN(HTYPE, 6_variant); + + if (tb[IPSET_ATTR_TIMEOUT]) { + h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); + set->extensions |= IPSET_EXT_TIMEOUT; + if (set->family == NFPROTO_IPV4) { + h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem)); + h->offset[IPSET_OFFSET_TIMEOUT] = + offsetof(struct TOKEN(HTYPE, 4t_elem), + timeout); + TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc)); + } else { + h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem)); + h->offset[IPSET_OFFSET_TIMEOUT] = + offsetof(struct TOKEN(HTYPE, 6t_elem), + timeout); + TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc)); + } + } else { + if (set->family == NFPROTO_IPV4) + h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem)); + else + h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem)); + } + + pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", + set->name, jhash_size(h->table->htable_bits), + h->table->htable_bits, h->maxelem, set->data, h->table); + + return 0; +} +#endif /* IP_SET_EMIT_CREATE */ -- cgit v1.2.3-70-g09d2 From 34d666d489cf70c246ca99b2387741915c34b88c Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Sat, 27 Apr 2013 14:38:56 +0200 Subject: netfilter: ipset: Introduce the counter extension in the core Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set.h | 75 +++++++++++++++++++++++++++-- include/uapi/linux/netfilter/ipset/ip_set.h | 5 ++ net/netfilter/ipset/ip_set_core.c | 10 ++++ 3 files changed, 86 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index bf0220cbf46a..0f978ebfaefb 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -52,18 +52,24 @@ enum ip_set_extension { IPSET_EXT_NONE = 0, IPSET_EXT_BIT_TIMEOUT = 1, IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT), + IPSET_EXT_BIT_COUNTER = 2, + IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), }; /* Extension offsets */ enum ip_set_offset { IPSET_OFFSET_TIMEOUT = 0, + IPSET_OFFSET_COUNTER, IPSET_OFFSET_MAX, }; #define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) +#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) struct ip_set_ext { unsigned long timeout; + u64 packets; + u64 bytes; }; struct ip_set; @@ -177,6 +183,65 @@ struct ip_set { void *data; }; +struct ip_set_counter { + atomic64_t bytes; + atomic64_t packets; +}; + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + if (ext->packets != ULLONG_MAX) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter))) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter))); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + /* register and unregister set references */ extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); extern void ip_set_put_byindex(ip_set_id_t index); @@ -318,10 +383,12 @@ bitmap_bytes(u32 a, u32 b) #include -#define IP_SET_INIT_KEXT(skb, opt, map) \ - { .timeout = ip_set_adt_opt_timeout(opt, map) } +#define IP_SET_INIT_KEXT(skb, opt, map) \ + { .bytes = (skb)->len, .packets = 1, \ + .timeout = ip_set_adt_opt_timeout(opt, map) } -#define IP_SET_INIT_UEXT(map) \ - { .timeout = (map)->timeout } +#define IP_SET_INIT_UEXT(map) \ + { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ + .timeout = (map)->timeout } #endif /*_IP_SET_H */ diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index fbee42807a11..ed452675d153 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -108,6 +108,8 @@ enum { IPSET_ATTR_CIDR2, IPSET_ATTR_IP2_TO, IPSET_ATTR_IFACE, + IPSET_ATTR_BYTES, + IPSET_ATTR_PACKETS, __IPSET_ATTR_ADT_MAX, }; #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) @@ -137,6 +139,7 @@ enum ipset_errno { IPSET_ERR_REFERENCED, IPSET_ERR_IPADDR_IPV4, IPSET_ERR_IPADDR_IPV6, + IPSET_ERR_COUNTER, /* Type specific error codes */ IPSET_ERR_TYPE_SPECIFIC = 4352, @@ -161,6 +164,8 @@ enum ipset_cadt_flags { IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), IPSET_FLAG_BIT_NOMATCH = 2, IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), + IPSET_FLAG_BIT_WITH_COUNTERS = 3, + IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS), IPSET_FLAG_CADT_MAX = 15, /* Upper half */ }; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 4486285d10da..f6d878a46c43 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -324,6 +324,16 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], return -IPSET_ERR_TIMEOUT; ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } + if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) { + if (!(set->extensions & IPSET_EXT_COUNTER)) + return -IPSET_ERR_COUNTER; + if (tb[IPSET_ATTR_BYTES]) + ext->bytes = be64_to_cpu(nla_get_be64( + tb[IPSET_ATTR_BYTES])); + if (tb[IPSET_ATTR_PACKETS]) + ext->packets = be64_to_cpu(nla_get_be64( + tb[IPSET_ATTR_PACKETS])); + } return 0; } EXPORT_SYMBOL_GPL(ip_set_get_extensions); -- cgit v1.2.3-70-g09d2 From 6e01781d1c80e2e8263471252a631e86165b15c5 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Sat, 27 Apr 2013 14:40:50 +0200 Subject: netfilter: ipset: set match: add support to match the counters The new revision of the set match supports to match the counters and to suppress updating the counters at matching too. At the set:list types, the updating of the subcounters can be suppressed as well. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set.h | 9 +++- include/uapi/linux/netfilter/ipset/ip_set.h | 31 +++++++++++-- include/uapi/linux/netfilter/xt_set.h | 9 ++++ net/netfilter/ipset/ip_set_core.c | 2 +- net/netfilter/ipset/ip_set_list_set.c | 8 +++- net/netfilter/xt_set.c | 70 +++++++++++++++++++++++++++++ 6 files changed, 120 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 0f978ebfaefb..d80e2753847c 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -76,7 +76,7 @@ struct ip_set; typedef int (*ipset_adtfn)(struct ip_set *set, void *value, const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags); + struct ip_set_ext *mext, u32 cmdflags); /* Kernel API function options */ struct ip_set_adt_opt { @@ -217,10 +217,15 @@ ip_set_update_counter(struct ip_set_counter *counter, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { - if (ext->packets != ULLONG_MAX) { + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } + if (flags & IPSET_FLAG_MATCH_COUNTERS) { + mext->packets = ip_set_get_packets(counter); + mext->bytes = ip_set_get_bytes(counter); + } } static inline bool diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index ed452675d153..8024cdf13b70 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -145,7 +145,7 @@ enum ipset_errno { IPSET_ERR_TYPE_SPECIFIC = 4352, }; -/* Flags at command level */ +/* Flags at command level or match/target flags, lower half of cmdattrs*/ enum ipset_cmd_flags { IPSET_FLAG_BIT_EXIST = 0, IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), @@ -153,10 +153,20 @@ enum ipset_cmd_flags { IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), IPSET_FLAG_BIT_LIST_HEADER = 2, IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), - IPSET_FLAG_CMD_MAX = 15, /* Lower half */ + IPSET_FLAG_BIT_SKIP_COUNTER_UPDATE = 3, + IPSET_FLAG_SKIP_COUNTER_UPDATE = + (1 << IPSET_FLAG_BIT_SKIP_COUNTER_UPDATE), + IPSET_FLAG_BIT_SKIP_SUBCOUNTER_UPDATE = 4, + IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE = + (1 << IPSET_FLAG_BIT_SKIP_SUBCOUNTER_UPDATE), + IPSET_FLAG_BIT_MATCH_COUNTERS = 5, + IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS), + IPSET_FLAG_BIT_RETURN_NOMATCH = 7, + IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH), + IPSET_FLAG_CMD_MAX = 15, }; -/* Flags at CADT attribute level */ +/* Flags at CADT attribute level, upper half of cmdattrs */ enum ipset_cadt_flags { IPSET_FLAG_BIT_BEFORE = 0, IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), @@ -166,7 +176,7 @@ enum ipset_cadt_flags { IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), IPSET_FLAG_BIT_WITH_COUNTERS = 3, IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS), - IPSET_FLAG_CADT_MAX = 15, /* Upper half */ + IPSET_FLAG_CADT_MAX = 15, }; /* Commands with settype-specific attributes */ @@ -195,6 +205,7 @@ enum ip_set_dim { * If changed, new revision of iptables match/target is required. */ IPSET_DIM_MAX = 6, + /* Backward compatibility: set match revision 2 */ IPSET_BIT_RETURN_NOMATCH = 7, }; @@ -207,6 +218,18 @@ enum ip_set_kopt { IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH), }; +enum { + IPSET_COUNTER_NONE = 0, + IPSET_COUNTER_EQ, + IPSET_COUNTER_NE, + IPSET_COUNTER_LT, + IPSET_COUNTER_GT, +}; + +struct ip_set_counter_match { + __u8 op; + __u64 value; +}; /* Interface to iptables/ip6tables */ diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index e3a9978f259f..964d3d42f874 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -62,4 +62,13 @@ struct xt_set_info_target_v2 { __u32 timeout; }; +/* Revision 3 match */ + +struct xt_set_info_match_v3 { + struct xt_set_info match_set; + struct ip_set_counter_match packets; + struct ip_set_counter_match bytes; + __u32 flags; +}; + #endif /*_XT_SET_H*/ diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index f6d878a46c43..f77139007983 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -413,7 +413,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, ret = 1; } else { /* --return-nomatch: invert matched element */ - if ((opt->flags & IPSET_RETURN_NOMATCH) && + if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) && (set->type->features & IPSET_TYPE_NOMATCH) && (ret > 0 || ret == -ENOTEMPTY)) ret = -ret; diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index c09022e37406..979b8c90e422 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -84,9 +84,13 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, { struct list_set *map = set->data; struct set_elem *e; - u32 i; + u32 i, cmdflags = opt->cmdflags; int ret; + /* Don't lookup sub-counters at all */ + opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; + if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) + opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; for (i = 0; i < map->size; i++) { e = list_set_elem(map, i); if (e->id == IPSET_INVALID_ID) @@ -99,7 +103,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, if (SET_WITH_COUNTER(set)) ip_set_update_counter(ext_counter(e, map), ext, &opt->ext, - opt->cmdflags); + cmdflags); return ret; } } diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 636c5199ff35..31790e789e22 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -189,6 +189,9 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, 0, UINT_MAX); + if (opt.flags & IPSET_RETURN_NOMATCH) + opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH; + return match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); } @@ -317,6 +320,52 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) #define set_target_v2_checkentry set_target_v1_checkentry #define set_target_v2_destroy set_target_v1_destroy +/* Revision 3 match */ + +static bool +match_counter(u64 counter, const struct ip_set_counter_match *info) +{ + switch (info->op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == info->value; + case IPSET_COUNTER_NE: + return counter != info->value; + case IPSET_COUNTER_LT: + return counter < info->value; + case IPSET_COUNTER_GT: + return counter > info->value; + } + return false; +} + +static bool +set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_set_info_match_v3 *info = par->matchinfo; + ADT_OPT(opt, par->family, info->match_set.dim, + info->match_set.flags, info->flags, UINT_MAX); + int ret; + + if (info->packets.op != IPSET_COUNTER_NONE || + info->bytes.op != IPSET_COUNTER_NONE) + opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; + + ret = match_set(info->match_set.index, skb, par, &opt, + info->match_set.flags & IPSET_INV_MATCH); + + if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) + return ret; + + if (!match_counter(opt.ext.packets, &info->packets)) + return 0; + return match_counter(opt.ext.bytes, &info->bytes); +} + +#define set_match_v3_checkentry set_match_v1_checkentry +#define set_match_v3_destroy set_match_v1_destroy + static struct xt_match set_matches[] __read_mostly = { { .name = "set", @@ -369,6 +418,27 @@ static struct xt_match set_matches[] __read_mostly = { .destroy = set_match_v1_destroy, .me = THIS_MODULE }, + /* counters support: update, match */ + { + .name = "set", + .family = NFPROTO_IPV4, + .revision = 3, + .match = set_match_v3, + .matchsize = sizeof(struct xt_set_info_match_v3), + .checkentry = set_match_v3_checkentry, + .destroy = set_match_v3_destroy, + .me = THIS_MODULE + }, + { + .name = "set", + .family = NFPROTO_IPV6, + .revision = 3, + .match = set_match_v3, + .matchsize = sizeof(struct xt_set_info_match_v3), + .checkentry = set_match_v3_checkentry, + .destroy = set_match_v3_destroy, + .me = THIS_MODULE + }, }; static struct xt_target set_targets[] __read_mostly = { -- cgit v1.2.3-70-g09d2 From bd7c4b604a6cd707803c7c6ba142bfa131f9a9f3 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 30 Apr 2013 05:35:05 +0000 Subject: netpoll: convert mutex into a semaphore Bart Van Assche recently reported a warning to me: [] warn_slowpath_common+0x7f/0xc0 [] warn_slowpath_null+0x1a/0x20 [] mutex_trylock+0x16d/0x180 [] netpoll_poll_dev+0x49/0xc30 [] ? __alloc_skb+0x82/0x2a0 [] netpoll_send_skb_on_dev+0x265/0x410 [] netpoll_send_udp+0x28a/0x3a0 [] ? write_msg+0x53/0x110 [netconsole] [] write_msg+0xcf/0x110 [netconsole] [] call_console_drivers.constprop.17+0xa1/0x1c0 [] console_unlock+0x2d6/0x450 [] vprintk_emit+0x1ee/0x510 [] printk+0x4d/0x4f [] scsi_print_command+0x7d/0xe0 [scsi_mod] This resulted from my commit ca99ca14c which introduced a mutex_trylock operation in a path that could execute in interrupt context. When mutex debugging is enabled, the above warns the user when we are in fact exectuting in interrupt context interrupt context. After some discussion, It seems that a semaphore is the proper mechanism to use here. While mutexes are defined to be unusable in interrupt context, no such condition exists for semaphores (save for the fact that the non blocking api calls, like up and down_trylock must be used when in irq context). Signed-off-by: Neil Horman Reported-by: Bart Van Assche CC: Bart Van Assche CC: David Miller CC: netdev@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/netpoll.h | 2 +- net/core/netpoll.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 9d7d8c64f7c8..fa2cb76a7029 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -40,7 +40,7 @@ struct netpoll_info { unsigned long rx_flags; spinlock_t rx_lock; - struct mutex dev_lock; + struct semaphore dev_lock; struct list_head rx_np; /* netpolls that registered an rx_hook */ struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */ diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 209d84253dd5..a5802a8b652f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -206,17 +206,17 @@ static void netpoll_poll_dev(struct net_device *dev) * the dev_open/close paths use this to block netpoll activity * while changing device state */ - if (!mutex_trylock(&ni->dev_lock)) + if (!down_trylock(&ni->dev_lock)) return; if (!netif_running(dev)) { - mutex_unlock(&ni->dev_lock); + up(&ni->dev_lock); return; } ops = dev->netdev_ops; if (!ops->ndo_poll_controller) { - mutex_unlock(&ni->dev_lock); + up(&ni->dev_lock); return; } @@ -225,7 +225,7 @@ static void netpoll_poll_dev(struct net_device *dev) poll_napi(dev); - mutex_unlock(&ni->dev_lock); + up(&ni->dev_lock); if (dev->flags & IFF_SLAVE) { if (ni) { @@ -255,7 +255,7 @@ int netpoll_rx_disable(struct net_device *dev) idx = srcu_read_lock(&netpoll_srcu); ni = srcu_dereference(dev->npinfo, &netpoll_srcu); if (ni) - mutex_lock(&ni->dev_lock); + down(&ni->dev_lock); srcu_read_unlock(&netpoll_srcu, idx); return 0; } @@ -267,7 +267,7 @@ void netpoll_rx_enable(struct net_device *dev) rcu_read_lock(); ni = rcu_dereference(dev->npinfo); if (ni) - mutex_unlock(&ni->dev_lock); + up(&ni->dev_lock); rcu_read_unlock(); } EXPORT_SYMBOL(netpoll_rx_enable); @@ -1047,7 +1047,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) INIT_LIST_HEAD(&npinfo->rx_np); spin_lock_init(&npinfo->rx_lock); - mutex_init(&npinfo->dev_lock); + sema_init(&npinfo->dev_lock, 1); skb_queue_head_init(&npinfo->neigh_tx); skb_queue_head_init(&npinfo->txq); INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); -- cgit v1.2.3-70-g09d2 From 20074f357da4a637430aec2879c9d864c5d2c23c Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Wed, 1 May 2013 16:24:08 -0400 Subject: filter: fix va_list build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes the following build error. In file included from include/linux/filter.h:52:0, from arch/arm/net/bpf_jit_32.c:14: include/linux/printk.h:54:2: error: unknown type name ‘va_list’ include/linux/printk.h:105:21: error: unknown type name ‘va_list’ include/linux/printk.h:108:30: error: unknown type name ‘va_list’ Signed-off-by: Xi Wang Signed-off-by: David S. Miller --- include/linux/filter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index d1248f401a56..c050dcc322a4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -48,6 +48,7 @@ extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); #ifdef CONFIG_BPF_JIT +#include #include #include -- cgit v1.2.3-70-g09d2