summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c2
-rw-r--r--net/802/garp.c8
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/p8022.c3
-rw-r--r--net/802/stp.c2
-rw-r--r--net/802/tr.c676
-rw-r--r--net/8021q/vlan.c10
-rw-r--r--net/8021q/vlan_core.c3
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/8021q/vlan_netlink.c16
-rw-r--r--net/9p/client.c6
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/Kconfig11
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/appletalk/sysctl_net_atalk.c10
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/ioctl.c8
-rw-r--r--net/atm/lec.c144
-rw-r--r--net/atm/lec.h5
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/mpoa_proc.c2
-rw-r--r--net/atm/pppoatm.c95
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/ax25/af_ax25.c9
-rw-r--r--net/ax25/ax25_dev.c10
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/sysctl_net_ax25.c82
-rw-r--r--net/batman-adv/Kconfig27
-rw-r--r--net/batman-adv/Makefile1
-rw-r--r--net/batman-adv/bat_debugfs.c23
-rw-r--r--net/batman-adv/bat_iv_ogm.c257
-rw-r--r--net/batman-adv/bat_sysfs.c110
-rw-r--r--net/batman-adv/bitarray.c118
-rw-r--r--net/batman-adv/bitarray.h26
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c1580
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h98
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/hard-interface.c170
-rw-r--r--net/batman-adv/icmp_socket.c4
-rw-r--r--net/batman-adv/main.c138
-rw-r--r--net/batman-adv/main.h19
-rw-r--r--net/batman-adv/originator.c55
-rw-r--r--net/batman-adv/originator.h7
-rw-r--r--net/batman-adv/packet.h50
-rw-r--r--net/batman-adv/routing.c86
-rw-r--r--net/batman-adv/routing.h5
-rw-r--r--net/batman-adv/send.c20
-rw-r--r--net/batman-adv/soft-interface.c500
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c460
-rw-r--r--net/batman-adv/translation-table.h12
-rw-r--r--net/batman-adv/types.h108
-rw-r--r--net/batman-adv/unicast.c8
-rw-r--r--net/batman-adv/vis.c8
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/hci_conn.c56
-rw-r--r--net/bluetooth/hci_core.c309
-rw-r--r--net/bluetooth/hci_event.c87
-rw-r--r--net/bluetooth/hci_sysfs.c5
-rw-r--r--net/bluetooth/hidp/core.c27
-rw-r--r--net/bluetooth/l2cap_core.c768
-rw-r--r--net/bluetooth/l2cap_sock.c87
-rw-r--r--net/bluetooth/mgmt.c299
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/rfcomm/tty.c137
-rw-r--r--net/bluetooth/sco.c75
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/bridge/br_device.c5
-rw-r--r--net/bridge/br_fdb.c150
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c158
-rw-r--r--net/bridge/br_netfilter.c46
-rw-r--r--net/bridge/br_netlink.c39
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_private_stp.h7
-rw-r--r--net/bridge/br_stp.c4
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c11
-rw-r--r--net/bridge/br_stp_timer.c6
-rw-r--r--net/bridge/br_sysfs_br.c20
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/caif/Kconfig2
-rw-r--r--net/caif/caif_socket.c28
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/caif/cfpkt_skbuff.c7
-rw-r--r--net/caif/cfsrvl.c3
-rw-r--r--net/caif/chnl_net.c23
-rw-r--r--net/can/gw.c2
-rw-r--r--net/ceph/auth_none.c15
-rw-r--r--net/ceph/auth_x.c15
-rw-r--r--net/ceph/auth_x.h6
-rw-r--r--net/ceph/ceph_common.c4
-rw-r--r--net/ceph/ceph_hash.c6
-rw-r--r--net/ceph/crush/crush.c39
-rw-r--r--net/ceph/crush/mapper.c133
-rw-r--r--net/ceph/debugfs.c6
-rw-r--r--net/ceph/messenger.c198
-rw-r--r--net/ceph/mon_client.c10
-rw-r--r--net/ceph/osd_client.c65
-rw-r--r--net/ceph/osdmap.c85
-rw-r--r--net/compat.c18
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c118
-rw-r--r--net/core/dev_addr_lists.c97
-rw-r--r--net/core/drop_monitor.c146
-rw-r--r--net/core/ethtool.c130
-rw-r--r--net/core/fib_rules.c32
-rw-r--r--net/core/filter.c78
-rw-r--r--net/core/gen_stats.c3
-rw-r--r--net/core/kmap_skb.h19
-rw-r--r--net/core/neighbour.c128
-rw-r--r--net/core/net-sysfs.c13
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/core/netprio_cgroup.c36
-rw-r--r--net/core/pktgen.c78
-rw-r--r--net/core/rtnetlink.c359
-rw-r--r--net/core/skbuff.c450
-rw-r--r--net/core/sock.c109
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/core/utils.c9
-rw-r--r--net/dcb/dcbnl.c94
-rw-r--r--net/dccp/ccids/ccid3.c12
-rw-r--r--net/dccp/dccp.h8
-rw-r--r--net/dccp/input.c10
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dccp/sysctl.c11
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_dev.c35
-rw-r--r--net/decnet/dn_fib.c10
-rw-r--r--net/decnet/dn_neigh.c22
-rw-r--r--net/decnet/dn_nsp_in.c13
-rw-r--r--net/decnet/dn_nsp_out.c9
-rw-r--r--net/decnet/dn_route.c33
-rw-r--r--net/decnet/dn_rules.c14
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c3
-rw-r--r--net/decnet/sysctl_net_decnet.c10
-rw-r--r--net/dns_resolver/dns_key.c7
-rw-r--r--net/dns_resolver/internal.h2
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/econet/Kconfig36
-rw-r--r--net/econet/Makefile7
-rw-r--r--net/econet/af_econet.c1172
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ieee802154/6lowpan.c127
-rw-r--r--net/ieee802154/6lowpan.h3
-rw-r--r--net/ieee802154/dgram.c6
-rw-r--r--net/ieee802154/nl-mac.c146
-rw-r--r--net/ieee802154/nl-phy.c38
-rw-r--r--net/ieee802154/raw.c2
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ah4.c6
-rw-r--r--net/ipv4/arp.c26
-rw-r--r--net/ipv4/devinet.c66
-rw-r--r--net/ipv4/esp4.c24
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_rules.c16
-rw-r--r--net/ipv4/fib_semantics.c59
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c20
-rw-r--r--net/ipv4/igmp.c18
-rw-r--r--net/ipv4/inet_connection_sock.c29
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_fragment.c45
-rw-r--r--net/ipv4/ip_gre.c102
-rw-r--r--net/ipv4/ip_input.c13
-rw-r--r--net/ipv4/ip_options.c32
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ipconfig.c20
-rw-r--r--net/ipv4/ipip.c57
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_queue.c639
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c19
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c26
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c8
-rw-r--r--net/ipv4/ping.c21
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c161
-rw-r--r--net/ipv4/sysctl_net_ipv4.c26
-rw-r--r--net/ipv4/tcp.c340
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_hybla.c10
-rw-r--r--net/ipv4/tcp_input.c640
-rw-r--r--net/ipv4/tcp_ipv4.c118
-rw-r--r--net/ipv4/tcp_memcontrol.c111
-rw-r--r--net/ipv4/tcp_minisocks.c25
-rw-r--r--net/ipv4/tcp_output.c154
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv4/udp.c58
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/xfrm4_policy.c6
-rw-r--r--net/ipv6/Kconfig4
-rw-r--r--net/ipv6/addrconf.c211
-rw-r--r--net/ipv6/addrconf_core.c4
-rw-r--r--net/ipv6/addrlabel.c26
-rw-r--r--net/ipv6/af_inet6.c70
-rw-r--r--net/ipv6/ah6.c30
-rw-r--r--net/ipv6/anycast.c12
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/esp6.c32
-rw-r--r--net/ipv6/exthdrs.c101
-rw-r--r--net/ipv6/exthdrs_core.c5
-rw-r--r--net/ipv6/fib6_rules.c18
-rw-r--r--net/ipv6/icmp.c27
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_fib.c34
-rw-r--r--net/ipv6/ip6_flowlabel.c29
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_output.c91
-rw-r--r--net/ipv6/ip6_tunnel.c62
-rw-r--r--net/ipv6/ip6mr.c14
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c71
-rw-r--r--net/ipv6/mip6.c32
-rw-r--r--net/ipv6/ndisc.c255
-rw-r--r--net/ipv6/netfilter/Kconfig22
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c641
-rw-r--r--net/ipv6/netfilter/ip6_tables.c55
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c4
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c4
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c4
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c4
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c13
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c47
-rw-r--r--net/ipv6/route.c141
-rw-r--r--net/ipv6/sit.c70
-rw-r--r--net/ipv6/sysctl_net_ipv6.c83
-rw-r--r--net/ipv6/tcp_ipv6.c81
-rw-r--r--net/ipv6/tunnel6.c10
-rw-r--r--net/ipv6/udp.c180
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/ipx/af_ipx.c14
-rw-r--r--net/ipx/sysctl_net_ipx.c11
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c2
-rw-r--r--net/irda/irsysctl.c10
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/Makefile3
-rw-r--r--net/l2tp/l2tp_core.c525
-rw-r--r--net/l2tp/l2tp_core.h57
-rw-r--r--net/l2tp/l2tp_debugfs.c14
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/l2tp/l2tp_ip.c133
-rw-r--r--net/l2tp/l2tp_ip6.c803
-rw-r--r--net/l2tp/l2tp_netlink.c197
-rw-r--r--net/l2tp/l2tp_ppp.c206
-rw-r--r--net/lapb/lapb_iface.c22
-rw-r--r--net/lapb/lapb_in.c320
-rw-r--r--net/lapb/lapb_out.c38
-rw-r--r--net/lapb/lapb_subr.c28
-rw-r--r--net/lapb/lapb_timer.c32
-rw-r--r--net/llc/af_llc.c16
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/llc/llc_output.c3
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/llc/sysctl_net_llc.c52
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c31
-rw-r--r--net/mac80211/agg-tx.c63
-rw-r--r--net/mac80211/cfg.c267
-rw-r--r--net/mac80211/chan.c26
-rw-r--r--net/mac80211/debugfs_netdev.c89
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h78
-rw-r--r--net/mac80211/driver-trace.h70
-rw-r--r--net/mac80211/ht.c17
-rw-r--r--net/mac80211/ibss.c53
-rw-r--r--net/mac80211/ieee80211_i.h82
-rw-r--r--net/mac80211/iface.c178
-rw-r--r--net/mac80211/main.c19
-rw-r--r--net/mac80211/mesh.c76
-rw-r--r--net/mac80211/mesh.h37
-rw-r--r--net/mac80211/mesh_hwmp.c52
-rw-r--r--net/mac80211/mesh_pathtbl.c14
-rw-r--r--net/mac80211/mesh_plink.c238
-rw-r--r--net/mac80211/mesh_sync.c316
-rw-r--r--net/mac80211/mlme.c387
-rw-r--r--net/mac80211/pm.c4
-rw-r--r--net/mac80211/rate.h7
-rw-r--r--net/mac80211/rc80211_minstrel.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c20
-rw-r--r--net/mac80211/rx.c78
-rw-r--r--net/mac80211/scan.c140
-rw-r--r--net/mac80211/sta_info.c34
-rw-r--r--net/mac80211/sta_info.h14
-rw-r--r--net/mac80211/status.c10
-rw-r--r--net/mac80211/tx.c118
-rw-r--r--net/mac80211/util.c263
-rw-r--r--net/mac80211/wep.c15
-rw-r--r--net/mac80211/wme.c46
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c15
-rw-r--r--net/mac80211/wpa.c10
-rw-r--r--net/mac802154/Kconfig16
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c294
-rw-r--r--net/mac802154/mac802154.h109
-rw-r--r--net/mac802154/mac_cmd.c45
-rw-r--r--net/mac802154/mib.c93
-rw-r--r--net/mac802154/monitor.c116
-rw-r--r--net/mac802154/rx.c114
-rw-r--r--net/mac802154/tx.c116
-rw-r--r--net/netfilter/Kconfig15
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c9
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c33
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c47
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c29
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c30
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c47
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c55
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c79
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c55
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c62
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c71
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c104
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c43
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c270
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c668
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c2
-rw-r--r--net/netfilter/nf_conntrack_acct.c4
-rw-r--r--net/netfilter/nf_conntrack_amanda.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c27
-rw-r--r--net/netfilter/nf_conntrack_ecache.c13
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c15
-rw-r--r--net/netfilter/nf_conntrack_helper.c122
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c93
-rw-r--r--net/netfilter/nf_conntrack_proto.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c75
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c14
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c10
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c11
-rw-r--r--net/netfilter/nfnetlink_log.c100
-rw-r--r--net/netfilter/nfnetlink_queue.c68
-rw-r--r--net/netfilter/xt_CT.c3
-rw-r--r--net/netfilter/xt_HMARK.c362
-rw-r--r--net/netfilter/xt_TCPMSS.c10
-rw-r--r--net/netfilter/xt_TEE.c12
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_hashlimit.c132
-rw-r--r--net/netfilter/xt_limit.c5
-rw-r--r--net/netfilter/xt_mac.c2
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netfilter/xt_set.c15
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlink/af_netlink.c75
-rw-r--r--net/netlink/genetlink.c37
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c10
-rw-r--r--net/nfc/Kconfig1
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/core.c218
-rw-r--r--net/nfc/hci/Kconfig17
-rw-r--r--net/nfc/hci/Makefile8
-rw-r--r--net/nfc/hci/command.c354
-rw-r--r--net/nfc/hci/core.c798
-rw-r--r--net/nfc/hci/hci.h139
-rw-r--r--net/nfc/hci/hcp.c156
-rw-r--r--net/nfc/hci/shdlc.c957
-rw-r--r--net/nfc/llcp/commands.c16
-rw-r--r--net/nfc/llcp/llcp.c28
-rw-r--r--net/nfc/llcp/sock.c57
-rw-r--r--net/nfc/nci/core.c27
-rw-r--r--net/nfc/nci/data.c8
-rw-r--r--net/nfc/nci/lib.c1
-rw-r--r--net/nfc/nci/ntf.c13
-rw-r--r--net/nfc/netlink.c107
-rw-r--r--net/nfc/nfc.h5
-rw-r--r--net/nfc/rawsock.c6
-rw-r--r--net/openvswitch/datapath.c63
-rw-r--r--net/openvswitch/flow.c21
-rw-r--r--net/openvswitch/vport-netdev.c10
-rw-r--r--net/packet/af_packet.c50
-rw-r--r--net/phonet/af_phonet.c2
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c25
-rw-r--r--net/phonet/pn_netlink.c8
-rw-r--r--net/phonet/socket.c12
-rw-r--r--net/phonet/sysctl.c17
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_sysctl.c11
-rw-r--r--net/rds/iw_sysctl.c11
-rw-r--r--net/rds/sysctl.c11
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--net/rose/rose_subr.c2
-rw-r--r--net/rose/sysctl_net_rose.c10
-rw-r--r--net/rxrpc/af_rxrpc.c8
-rw-r--r--net/rxrpc/ar-ack.c6
-rw-r--r--net/rxrpc/ar-call.c4
-rw-r--r--net/rxrpc/ar-input.c2
-rw-r--r--net/rxrpc/ar-internal.h16
-rw-r--r--net/rxrpc/ar-key.c22
-rw-r--r--net/rxrpc/rxkad.c6
-rw-r--r--net/sched/Kconfig22
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_csum.c8
-rw-r--r--net/sched/act_gact.c9
-rw-r--r--net/sched/act_ipt.c21
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_nat.c6
-rw-r--r--net/sched/act_pedit.c6
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c27
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_basic.c5
-rw-r--r--net/sched/cls_cgroup.c31
-rw-r--r--net/sched/cls_flow.c35
-rw-r--r--net/sched/cls_fw.c15
-rw-r--r--net/sched/cls_route.c16
-rw-r--r--net/sched/cls_rsvp.h16
-rw-r--r--net/sched/cls_tcindex.c14
-rw-r--r--net/sched/cls_u32.c43
-rw-r--r--net/sched/em_meta.c19
-rw-r--r--net/sched/ematch.c10
-rw-r--r--net/sched/sch_api.c19
-rw-r--r--net/sched/sch_atm.c25
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_codel.c276
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c21
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_fq_codel.c626
-rw-r--r--net/sched/sch_generic.c14
-rw-r--r--net/sched/sch_gred.c25
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c45
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c5
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_sideeffect.c9
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/sysctl.c10
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/socket.c68
-rw-r--r--net/sunrpc/auth_generic.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c7
-rw-r--r--net/sunrpc/auth_unix.c15
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/clnt.c56
-rw-r--r--net/sunrpc/rpc_pipe.c20
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sunrpc_syms.c17
-rw-r--r--net/sunrpc/svc.c18
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcauth_unix.c18
-rw-r--r--net/sunrpc/svcsock.c30
-rw-r--r--net/sunrpc/timer.c6
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/sunrpc/xprt.c9
-rw-r--r--net/sysctl_net.c49
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/addr.c3
-rw-r--r--net/tipc/addr.h19
-rw-r--r--net/tipc/bcast.c22
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/bearer.c24
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c30
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c11
-rw-r--r--net/tipc/core.h14
-rw-r--r--net/tipc/discover.c14
-rw-r--r--net/tipc/eth_media.c19
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/link.c122
-rw-r--r--net/tipc/link.h6
-rw-r--r--net/tipc/log.c14
-rw-r--r--net/tipc/log.h1
-rw-r--r--net/tipc/msg.c3
-rw-r--r--net/tipc/msg.h21
-rw-r--r--net/tipc/name_distr.c130
-rw-r--r--net/tipc/name_table.c98
-rw-r--r--net/tipc/name_table.h3
-rw-r--r--net/tipc/net.c9
-rw-r--r--net/tipc/node.c17
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/node_subscr.c5
-rw-r--r--net/tipc/node_subscr.h1
-rw-r--r--net/tipc/port.c130
-rw-r--r--net/tipc/port.h14
-rw-r--r--net/tipc/ref.c13
-rw-r--r--net/tipc/socket.c103
-rw-r--r--net/tipc/subscr.c45
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/unix/af_unix.c36
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/sysctl_net_unix.c10
-rw-r--r--net/wanrouter/Kconfig2
-rw-r--r--net/wimax/stack.c5
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c13
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/ethtool.c29
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c33
-rw-r--r--net/wireless/lib80211_crypt_tkip.c50
-rw-r--r--net/wireless/mesh.c4
-rw-r--r--net/wireless/mlme.c58
-rw-r--r--net/wireless/nl80211.c1384
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c10
-rw-r--r--net/wireless/scan.c10
-rw-r--r--net/wireless/util.c25
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/wireless/wext-core.c15
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/wireless/wext-spy.c2
-rw-r--r--net/x25/sysctl_net_x25.c10
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/x25/x25_facilities.c4
-rw-r--r--net/xfrm/Kconfig13
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_algo.c5
-rw-r--r--net/xfrm/xfrm_hash.h8
-rw-r--r--net/xfrm/xfrm_policy.c31
-rw-r--r--net/xfrm/xfrm_sysctl.c2
-rw-r--r--net/xfrm/xfrm_user.c105
589 files changed, 21100 insertions, 13780 deletions
diff --git a/net/802/Makefile b/net/802/Makefile
index 7893d679910c..a30d6e385aed 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -4,7 +4,6 @@
# Check the p8022 selections against net/core/Makefile.
obj-$(CONFIG_LLC) += p8022.o psnap.o
-obj-$(CONFIG_TR) += p8022.o psnap.o tr.o
obj-$(CONFIG_NET_FC) += fc.o
obj-$(CONFIG_FDDI) += fddi.o
obj-$(CONFIG_HIPPI) += hippi.o
diff --git a/net/802/fc.c b/net/802/fc.c
index b324e31401a9..05eea6b98bb8 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -35,7 +35,7 @@
static int fc_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
struct fch_hdr *fch;
int hdr_len;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 5ab25cd4314b..9cda40661e0d 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -51,7 +51,7 @@
static int fddi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
int hl = FDDI_K_SNAP_HLEN;
struct fddihdr *fddi;
diff --git a/net/802/garp.c b/net/802/garp.c
index a5c224830439..8456f5d98b85 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
while (parent) {
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
- if (d < 0)
+ if (d > 0)
parent = parent->rb_left;
- else if (d > 0)
+ else if (d < 0)
parent = parent->rb_right;
else
return attr;
@@ -178,9 +178,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app,
parent = *p;
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
- if (d < 0)
+ if (d > 0)
p = &parent->rb_left;
- else if (d > 0)
+ else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 056794e66375..51a1f530417d 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -45,7 +45,7 @@
static int hippi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 7f353c4f437a..0bda8de7df51 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -1,6 +1,5 @@
/*
- * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring
- * is kept separate see p8022tr.c)
+ * NET3: Support for 802.2 demultiplexing off Ethernet
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
diff --git a/net/802/stp.c b/net/802/stp.c
index 15540b7323cd..2c40ba0ec116 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
proto = rcu_dereference(garp_protos[eh->h_dest[5] -
GARP_ADDR_MIN]);
if (proto &&
- compare_ether_addr(eh->h_dest, proto->group_address))
+ !ether_addr_equal(eh->h_dest, proto->group_address))
goto err;
} else
proto = rcu_dereference(stp_proto);
diff --git a/net/802/tr.c b/net/802/tr.c
deleted file mode 100644
index b9a3a145e348..000000000000
--- a/net/802/tr.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * NET3: Token ring device handling subroutines
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
- * Added rif table to /proc/net/tr_rif and rif timeout to
- * /proc/sys/net/token-ring/rif_timeout.
- * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
- * tr_header and tr_type_trans to handle passing IPX SNAP and
- * 802.2 through the correct layers. Eliminated tr_reformat.
- *
- */
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/net.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/net_namespace.h>
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
-static void rif_check_expire(unsigned long dummy);
-
-#define TR_SR_DEBUG 0
-
-/*
- * Each RIF entry we learn is kept this way
- */
-
-struct rif_cache {
- unsigned char addr[TR_ALEN];
- int iface;
- __be16 rcf;
- __be16 rseg[8];
- struct rif_cache *next;
- unsigned long last_used;
- unsigned char local_ring;
-};
-
-#define RIF_TABLE_SIZE 32
-
-/*
- * We hash the RIF cache 32 ways. We do after all have to look it
- * up a lot.
- */
-
-static struct rif_cache *rif_table[RIF_TABLE_SIZE];
-
-static DEFINE_SPINLOCK(rif_lock);
-
-
-/*
- * Garbage disposal timer.
- */
-
-static struct timer_list rif_timer;
-
-static int sysctl_tr_rif_timeout = 60*10*HZ;
-
-static inline unsigned long rif_hash(const unsigned char *addr)
-{
- unsigned long x;
-
- x = addr[0];
- x = (x << 2) ^ addr[1];
- x = (x << 2) ^ addr[2];
- x = (x << 2) ^ addr[3];
- x = (x << 2) ^ addr[4];
- x = (x << 2) ^ addr[5];
-
- x ^= x >> 8;
-
- return x & (RIF_TABLE_SIZE - 1);
-}
-
-/*
- * Put the headers on a token ring packet. Token ring source routing
- * makes this a little more exciting than on ethernet.
- */
-
-static int tr_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
-{
- struct trh_hdr *trh;
- int hdr_len;
-
- /*
- * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
- * dev->hard_header directly.
- */
- if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
- {
- struct trllc *trllc;
-
- hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
- trh = (struct trh_hdr *)skb_push(skb, hdr_len);
- trllc = (struct trllc *)(trh+1);
- trllc->dsap = trllc->ssap = EXTENDED_SAP;
- trllc->llc = UI_CMD;
- trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
- trllc->ethertype = htons(type);
- }
- else
- {
- hdr_len = sizeof(struct trh_hdr);
- trh = (struct trh_hdr *)skb_push(skb, hdr_len);
- }
-
- trh->ac=AC;
- trh->fc=LLC_FRAME;
-
- if(saddr)
- memcpy(trh->saddr,saddr,dev->addr_len);
- else
- memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
-
- /*
- * Build the destination and then source route the frame
- */
-
- if(daddr)
- {
- memcpy(trh->daddr,daddr,dev->addr_len);
- tr_source_route(skb, trh, dev);
- return hdr_len;
- }
-
- return -hdr_len;
-}
-
-/*
- * A neighbour discovery of some species (eg arp) has completed. We
- * can now send the packet.
- */
-
-static int tr_rebuild_header(struct sk_buff *skb)
-{
- struct trh_hdr *trh=(struct trh_hdr *)skb->data;
- struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
- struct net_device *dev = skb->dev;
-
- /*
- * FIXME: We don't yet support IPv6 over token rings
- */
-
- if(trllc->ethertype != htons(ETH_P_IP)) {
- printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
- return 0;
- }
-
-#ifdef CONFIG_INET
- if(arp_find(trh->daddr, skb)) {
- return 1;
- }
- else
-#endif
- {
- tr_source_route(skb,trh,dev);
- return 0;
- }
-}
-
-/*
- * Some of this is a bit hackish. We intercept RIF information
- * used for source routing. We also grab IP directly and don't feed
- * it via SNAP.
- */
-
-__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
-
- struct trh_hdr *trh;
- struct trllc *trllc;
- unsigned riflen=0;
-
- skb->dev = dev;
- skb_reset_mac_header(skb);
- trh = tr_hdr(skb);
-
- if(trh->saddr[0] & TR_RII)
- riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
-
- trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
- skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
- if(*trh->daddr & 0x80)
- {
- if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
- skb->pkt_type=PACKET_BROADCAST;
- else
- skb->pkt_type=PACKET_MULTICAST;
- }
- else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
- {
- skb->pkt_type=PACKET_MULTICAST;
- }
- else if(dev->flags & IFF_PROMISC)
- {
- if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
- skb->pkt_type=PACKET_OTHERHOST;
- }
-
- if ((skb->pkt_type != PACKET_BROADCAST) &&
- (skb->pkt_type != PACKET_MULTICAST))
- tr_add_rif_info(trh,dev) ;
-
- /*
- * Strip the SNAP header from ARP packets since we don't
- * pass them through to the 802.2/SNAP layers.
- */
-
- if (trllc->dsap == EXTENDED_SAP &&
- (trllc->ethertype == htons(ETH_P_IP) ||
- trllc->ethertype == htons(ETH_P_IPV6) ||
- trllc->ethertype == htons(ETH_P_ARP)))
- {
- skb_pull(skb, sizeof(struct trllc));
- return trllc->ethertype;
- }
-
- return htons(ETH_P_TR_802_2);
-}
-
-/*
- * We try to do source routing...
- */
-
-void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
- struct net_device *dev)
-{
- int slack;
- unsigned int hash;
- struct rif_cache *entry;
- unsigned char *olddata;
- unsigned long flags;
- static const unsigned char mcast_func_addr[]
- = {0xC0,0x00,0x00,0x04,0x00,0x00};
-
- spin_lock_irqsave(&rif_lock, flags);
-
- /*
- * Broadcasts are single route as stated in RFC 1042
- */
- if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
- (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
- {
- trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
- | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
- trh->saddr[0]|=TR_RII;
- }
- else
- {
- hash = rif_hash(trh->daddr);
- /*
- * Walk the hash table and look for an entry
- */
- for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
-
- /*
- * If we found an entry we can route the frame.
- */
- if(entry)
- {
-#if TR_SR_DEBUG
-printk("source routing for %pM\n", trh->daddr);
-#endif
- if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
- {
- trh->rcf=entry->rcf;
- memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
- trh->rcf^=htons(TR_RCF_DIR_BIT);
- trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
-
- trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
- printk("entry found with rcf %04x\n", entry->rcf);
- }
- else
- {
- printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
-#endif
- }
- entry->last_used=jiffies;
- }
- else
- {
- /*
- * Without the information we simply have to shout
- * on the wire. The replies should rapidly clean this
- * situation up.
- */
- trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
- | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
- trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
- printk("no entry in rif table found - broadcasting frame\n");
-#endif
- }
- }
-
- /* Compress the RIF here so we don't have to do it in the driver(s) */
- if (!(trh->saddr[0] & 0x80))
- slack = 18;
- else
- slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
- olddata = skb->data;
- spin_unlock_irqrestore(&rif_lock, flags);
-
- skb_pull(skb, slack);
- memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
-}
-
-/*
- * We have learned some new RIF information for our source
- * routing.
- */
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
-{
- unsigned int hash, rii_p = 0;
- unsigned long flags;
- struct rif_cache *entry;
- unsigned char saddr0;
-
- spin_lock_irqsave(&rif_lock, flags);
- saddr0 = trh->saddr[0];
-
- /*
- * Firstly see if the entry exists
- */
-
- if(trh->saddr[0] & TR_RII)
- {
- trh->saddr[0]&=0x7f;
- if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
- {
- rii_p = 1;
- }
- }
-
- hash = rif_hash(trh->saddr);
- for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
-
- if(entry==NULL)
- {
-#if TR_SR_DEBUG
- printk("adding rif_entry: addr:%pM rcf:%04X\n",
- trh->saddr, ntohs(trh->rcf));
-#endif
- /*
- * Allocate our new entry. A failure to allocate loses
- * use the information. This is harmless.
- *
- * FIXME: We ought to keep some kind of cache size
- * limiting and adjust the timers to suit.
- */
- entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
-
- if(!entry)
- {
- printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
- spin_unlock_irqrestore(&rif_lock, flags);
- return;
- }
-
- memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
- entry->iface = dev->ifindex;
- entry->next=rif_table[hash];
- entry->last_used=jiffies;
- rif_table[hash]=entry;
-
- if (rii_p)
- {
- entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
- memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
- entry->local_ring = 0;
- }
- else
- {
- entry->local_ring = 1;
- }
- }
- else /* Y. Tahara added */
- {
- /*
- * Update existing entries
- */
- if (!entry->local_ring)
- if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
- !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
- {
-#if TR_SR_DEBUG
-printk("updating rif_entry: addr:%pM rcf:%04X\n",
- trh->saddr, ntohs(trh->rcf));
-#endif
- entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
- memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
- }
- entry->last_used=jiffies;
- }
- trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
- spin_unlock_irqrestore(&rif_lock, flags);
-}
-
-/*
- * Scan the cache with a timer and see what we need to throw out.
- */
-
-static void rif_check_expire(unsigned long dummy)
-{
- int i;
- unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
-
- spin_lock_irqsave(&rif_lock, flags);
-
- for(i =0; i < RIF_TABLE_SIZE; i++) {
- struct rif_cache *entry, **pentry;
-
- pentry = rif_table+i;
- while((entry=*pentry) != NULL) {
- unsigned long expires
- = entry->last_used + sysctl_tr_rif_timeout;
-
- if (time_before_eq(expires, jiffies)) {
- *pentry = entry->next;
- kfree(entry);
- } else {
- pentry = &entry->next;
-
- if (time_before(expires, next_interval))
- next_interval = expires;
- }
- }
- }
-
- spin_unlock_irqrestore(&rif_lock, flags);
-
- mod_timer(&rif_timer, next_interval);
-
-}
-
-/*
- * Generate the /proc/net information for the token ring RIF
- * routing.
- */
-
-#ifdef CONFIG_PROC_FS
-
-static struct rif_cache *rif_get_idx(loff_t pos)
-{
- int i;
- struct rif_cache *entry;
- loff_t off = 0;
-
- for(i = 0; i < RIF_TABLE_SIZE; i++)
- for(entry = rif_table[i]; entry; entry = entry->next) {
- if (off == pos)
- return entry;
- ++off;
- }
-
- return NULL;
-}
-
-static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&rif_lock)
-{
- spin_lock_irq(&rif_lock);
-
- return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- int i;
- struct rif_cache *ent = v;
-
- ++*pos;
-
- if (v == SEQ_START_TOKEN) {
- i = -1;
- goto scan;
- }
-
- if (ent->next)
- return ent->next;
-
- i = rif_hash(ent->addr);
- scan:
- while (++i < RIF_TABLE_SIZE) {
- if ((ent = rif_table[i]) != NULL)
- return ent;
- }
- return NULL;
-}
-
-static void rif_seq_stop(struct seq_file *seq, void *v)
- __releases(&rif_lock)
-{
- spin_unlock_irq(&rif_lock);
-}
-
-static int rif_seq_show(struct seq_file *seq, void *v)
-{
- int j, rcf_len, segment, brdgnmb;
- struct rif_cache *entry = v;
-
- if (v == SEQ_START_TOKEN)
- seq_puts(seq,
- "if TR address TTL rcf routing segments\n");
- else {
- struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
- long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
- - (long) jiffies;
-
- seq_printf(seq, "%s %pM %7li ",
- dev?dev->name:"?",
- entry->addr,
- ttl/HZ);
-
- if (entry->local_ring)
- seq_puts(seq, "local\n");
- else {
-
- seq_printf(seq, "%04X", ntohs(entry->rcf));
- rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
- if (rcf_len)
- rcf_len >>= 1;
- for(j = 1; j < rcf_len; j++) {
- if(j==1) {
- segment=ntohs(entry->rseg[j-1])>>4;
- seq_printf(seq," %03X",segment);
- }
-
- segment=ntohs(entry->rseg[j])>>4;
- brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
- seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
- }
- seq_putc(seq, '\n');
- }
-
- if (dev)
- dev_put(dev);
- }
- return 0;
-}
-
-
-static const struct seq_operations rif_seq_ops = {
- .start = rif_seq_start,
- .next = rif_seq_next,
- .stop = rif_seq_stop,
- .show = rif_seq_show,
-};
-
-static int rif_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &rif_seq_ops);
-}
-
-static const struct file_operations rif_seq_fops = {
- .owner = THIS_MODULE,
- .open = rif_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#endif
-
-static const struct header_ops tr_header_ops = {
- .create = tr_header,
- .rebuild= tr_rebuild_header,
-};
-
-static void tr_setup(struct net_device *dev)
-{
- /*
- * Configure and register
- */
-
- dev->header_ops = &tr_header_ops;
-
- dev->type = ARPHRD_IEEE802_TR;
- dev->hard_header_len = TR_HLEN;
- dev->mtu = 2000;
- dev->addr_len = TR_ALEN;
- dev->tx_queue_len = 100; /* Long queues on tr */
-
- memset(dev->broadcast,0xFF, TR_ALEN);
-
- /* New-style flags. */
- dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
-}
-
-/**
- * alloc_trdev - Register token ring device
- * @sizeof_priv: Size of additional driver-private structure to be allocated
- * for this token ring device
- *
- * Fill in the fields of the device structure with token ring-generic values.
- *
- * Constructs a new net device, complete with a private data area of
- * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
- * this private data area.
- */
-struct net_device *alloc_trdev(int sizeof_priv)
-{
- return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
-}
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tr_table[] = {
- {
- .procname = "rif_timeout",
- .data = &sysctl_tr_rif_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- { },
-};
-
-static __initdata struct ctl_path tr_path[] = {
- { .procname = "net", },
- { .procname = "token-ring", },
- { }
-};
-#endif
-
-/*
- * Called during bootup. We don't actually have to initialise
- * too much for this.
- */
-
-static int __init rif_init(void)
-{
- rif_timer.expires = jiffies + sysctl_tr_rif_timeout;
- setup_timer(&rif_timer, rif_check_expire, 0);
- add_timer(&rif_timer);
-#ifdef CONFIG_SYSCTL
- register_sysctl_paths(tr_path, tr_table);
-#endif
- proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
- return 0;
-}
-
-module_init(rif_init);
-
-EXPORT_SYMBOL(tr_type_trans);
-EXPORT_SYMBOL(alloc_trdev);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index efea35b02e7f..6089f0cf23b4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev,
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
/* May be called without an actual change */
- if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
+ if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
/* vlan address was different from the old address and is equal to
* the new address */
- if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
- !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
- if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
- compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4d39d802be2c..8ca533c95de0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
- if (!compare_ether_addr(eth_hdr(skb)->h_dest,
- vlan_dev->dev_addr))
+ if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb372..da1bc9c3cf38 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
- skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
+ skb->dev = vlan_dev_priv(dev)->real_dev;
len = skb->len;
if (netpoll_tx_running(dev))
return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
@@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev)
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
@@ -307,7 +307,7 @@ clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
del_unicast:
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
@@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
@@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
if (!(dev->flags & IFF_UP))
goto out;
- if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
+ if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
- if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 50711368ad6a..708c80ea1874 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct nlattr *nest;
unsigned int i;
- NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
+ if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
+ goto nla_put_failure;
if (vlan->flags) {
f.flags = vlan->flags;
f.mask = ~0;
- NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
+ if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
+ goto nla_put_failure;
}
if (vlan->nr_ingress_mappings) {
nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
m.from = i;
m.to = vlan->ingress_priority_map[i];
- NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
- sizeof(m), &m);
+ if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m))
+ goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
@@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
m.from = pm->priority;
m.to = (pm->vlan_qos >> 13) & 0x7;
- NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
- sizeof(m), &m);
+ if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m))
+ goto nla_put_failure;
}
}
nla_nest_end(skb, nest);
diff --git a/net/9p/client.c b/net/9p/client.c
index b23a17c431c8..a170893d70e0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1530,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
- fid->fid, (long long unsigned) offset, count);
+ fid->fid, (unsigned long long) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1605,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
- fid->fid, (long long unsigned) offset, count);
+ fid->fid, (unsigned long long) offset, count);
err = 0;
clnt = fid->clnt;
@@ -2040,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
char *dataptr;
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
- fid->fid, (long long unsigned) offset, count);
+ fid->fid, (unsigned long long) offset, count);
err = 0;
clnt = fid->clnt;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fccae26fa674..6449bae15702 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -513,7 +513,7 @@ error:
clear_bit(Wworksched, &m->wsched);
}
-static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
{
struct p9_poll_wait *pwait =
container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 3d432068f627..5af18d11b518 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -615,7 +615,8 @@ static void p9_virtio_remove(struct virtio_device *vdev)
{
struct virtio_chan *chan = vdev->priv;
- BUG_ON(chan->inuse);
+ if (chan->inuse)
+ p9_virtio_close(chan->client);
vdev->config->del_vqs(vdev);
mutex_lock(&virtio_9p_lock);
diff --git a/net/Kconfig b/net/Kconfig
index e07272d0bb2d..245831bec09a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -207,10 +207,10 @@ source "net/ipx/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
source "net/lapb/Kconfig"
-source "net/econet/Kconfig"
source "net/wanrouter/Kconfig"
source "net/phonet/Kconfig"
source "net/ieee802154/Kconfig"
+source "net/mac802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
source "net/dns_resolver/Kconfig"
@@ -246,9 +246,6 @@ config BQL
select DQL
default y
-config HAVE_BPF_JIT
- bool
-
config BPF_JIT
bool "enable BPF Just In Time compiler"
depends on HAVE_BPF_JIT
@@ -295,7 +292,7 @@ config NET_TCPPROBE
module will be called tcp_probe.
config NET_DROP_MONITOR
- boolean "Network packet drop alerting service"
+ tristate "Network packet drop alerting service"
depends on INET && EXPERIMENTAL && TRACEPOINTS
---help---
This feature provides an alerting service to userspace in the
@@ -340,3 +337,7 @@ source "net/nfc/Kconfig"
endif # if NET
+
+# Used by archs to tell that they support BPF_JIT
+config HAVE_BPF_JIT
+ bool
diff --git a/net/Makefile b/net/Makefile
index ad432fa4d934..4f4ee083064c 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
-obj-$(CONFIG_ECONET) += econet/
obj-$(CONFIG_PHONET) += phonet/
ifneq ($(CONFIG_VLAN_8021Q),)
obj-y += 8021q/
@@ -60,6 +59,7 @@ ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
obj-$(CONFIG_IEEE802154) += ieee802154/
+obj-$(CONFIG_MAC802154) += mac802154/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab93eda5..0301b328cf0f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
#include <net/tcp_states.h>
#include <net/route.h>
#include <linux/atalk.h>
-#include "../core/kmap_skb.h"
+#include <linux/highmem.h>
struct datalink_proto *ddp_dl, *aarp_dl;
static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(frag);
+ vaddr = kmap_atomic(skb_frag_page(frag));
sum = atalk_sum_partial(vaddr + frag->page_offset +
offset - start, copy, sum);
- kunmap_skb_frag(vaddr);
+ kunmap_atomic(vaddr);
if (!(len -= copy))
return sum;
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index 04e9c0da7aa9..ebb864361f7a 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -42,20 +42,14 @@ static struct ctl_table atalk_table[] = {
{ },
};
-static struct ctl_path atalk_path[] = {
- { .procname = "net", },
- { .procname = "appletalk", },
- { }
-};
-
static struct ctl_table_header *atalk_table_header;
void atalk_register_sysctl(void)
{
- atalk_table_header = register_sysctl_paths(atalk_path, atalk_table);
+ atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
}
void atalk_unregister_sysctl(void)
{
- unregister_sysctl_table(atalk_table_header);
+ unregister_net_sysctl_table(atalk_table_header);
}
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 353fccf1cde3..4819d31533e0 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -73,7 +73,7 @@ struct br2684_vcc {
#ifdef CONFIG_ATM_BR2684_IPFILTER
struct br2684_filter filter;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
- unsigned copies_needed, copies_failed;
+ unsigned int copies_needed, copies_failed;
};
struct br2684_dev {
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 62dc8bfe6fe7..bbd3b639992e 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
error = sock_get_timestampns(sk, argp);
goto done;
case ATM_SETSC:
- if (net_ratelimit())
- pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
- current->comm, task_pid_nr(current));
+ net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
+ current->comm, task_pid_nr(current));
error = 0;
goto done;
case ATMSIGD_CTRL:
@@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
work for 32-bit userspace. TBH I don't really want
to think about it at all. dwmw2. */
if (compat) {
- if (net_ratelimit())
- pr_warning("32-bit task cannot be atmsigd\n");
+ net_warn_ratelimited("32-bit task cannot be atmsigd\n");
error = -EINVAL;
goto done;
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index f1964caa0f83..a7d172105c99 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -26,11 +26,6 @@
#include <linux/spinlock.h>
#include <linux/seq_file.h>
-/* TokenRing if needed */
-#ifdef CONFIG_TR
-#include <linux/trdevice.h>
-#endif
-
/* And atm device */
#include <linux/atmdev.h>
#include <linux/atmlec.h>
@@ -163,50 +158,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
/*
- * Modelled after tr_type_trans
- * All multicast and ARE or STE frames go to BUS.
- * Non source routed frames go by destination address.
- * Last hop source routed frames go by destination address.
- * Not last hop source routed frames go by _next_ route descriptor.
- * Returns pointer to destination MAC address or fills in rdesc
- * and returns NULL.
- */
-#ifdef CONFIG_TR
-static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
-{
- struct trh_hdr *trh;
- unsigned int riflen, num_rdsc;
-
- trh = (struct trh_hdr *)packet;
- if (trh->daddr[0] & (uint8_t) 0x80)
- return bus_mac; /* multicast */
-
- if (trh->saddr[0] & TR_RII) {
- riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
- if ((ntohs(trh->rcf) >> 13) != 0)
- return bus_mac; /* ARE or STE */
- } else
- return trh->daddr; /* not source routed */
-
- if (riflen < 6)
- return trh->daddr; /* last hop, source routed */
-
- /* riflen is 6 or more, packet has more than one route descriptor */
- num_rdsc = (riflen / 2) - 1;
- memset(rdesc, 0, ETH_ALEN);
- /* offset 4 comes from LAN destination field in LE control frames */
- if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
- memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
- else {
- memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
- rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
- }
-
- return NULL;
-}
-#endif /* CONFIG_TR */
-
-/*
* Open/initialize the netdevice. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
@@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct lec_arp_table *entry;
unsigned char *dst;
int min_frame_size;
-#ifdef CONFIG_TR
- unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
-#endif
int is_rdesc;
pr_debug("called\n");
@@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
}
skb_push(skb, 2);
- /* Put le header to place, works for TokenRing too */
+ /* Put le header to place */
lec_h = (struct lecdatahdr_8023 *)skb->data;
lec_h->le_header = htons(priv->lecid);
-#ifdef CONFIG_TR
- /*
- * Ugly. Use this to realign Token Ring packets for
- * e.g. PCA-200E driver.
- */
- if (priv->is_trdev) {
- skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
- kfree_skb(skb);
- if (skb2 == NULL)
- return NETDEV_TX_OK;
- skb = skb2;
- }
-#endif
-
#if DUMP_PACKETS >= 2
#define MAX_DUMP_SKB 99
#elif DUMP_PACKETS >= 1
@@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
#endif /* DUMP_PACKETS >= 1 */
/* Minimum ethernet-frame size */
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- min_frame_size = LEC_MINIMUM_8025_SIZE;
- else
-#endif
- min_frame_size = LEC_MINIMUM_8023_SIZE;
+ min_frame_size = LEC_MINIMUM_8023_SIZE;
if (skb->len < min_frame_size) {
if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
skb2 = skb_copy_expand(skb, 0,
@@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
/* Send to right vcc */
is_rdesc = 0;
dst = lec_h->h_dest;
-#ifdef CONFIG_TR
- if (priv->is_trdev) {
- dst = get_tr_dst(skb->data + 2, rdesc);
- if (dst == NULL) {
- dst = rdesc;
- is_rdesc = 1;
- }
- }
-#endif
entry = NULL;
vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
@@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
- else
-#endif
- dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
+ dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
/*
* If this is a Data Direct VCC, and the VCC does not match
@@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
*/
spin_lock_irqsave(&priv->lec_arp_lock, flags);
if (lec_is_data_direct(vcc)) {
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- src =
- ((struct lecdatahdr_8025 *)skb->data)->
- h_source;
- else
-#endif
- src =
- ((struct lecdatahdr_8023 *)skb->data)->
- h_source;
+ src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
entry = lec_arp_find(priv, src);
if (entry && entry->vcc != vcc) {
lec_arp_remove(priv, entry);
@@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
if (!hlist_empty(&priv->lec_arp_empty_ones))
lec_arp_check_empties(priv, vcc, skb);
skb_pull(skb, 2); /* skip lec_id */
-#ifdef CONFIG_TR
- if (priv->is_trdev)
- skb->protocol = tr_type_trans(skb, dev);
- else
-#endif
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
@@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = 0;
else
i = arg;
-#ifdef CONFIG_TR
if (arg >= MAX_LEC_ITF)
return -EINVAL;
-#else /* Reserve the top NUM_TR_DEVS for TR */
- if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
- return -EINVAL;
-#endif
if (!dev_lec[i]) {
- int is_trdev, size;
-
- is_trdev = 0;
- if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
- is_trdev = 1;
+ int size;
size = sizeof(struct lec_priv);
-#ifdef CONFIG_TR
- if (is_trdev)
- dev_lec[i] = alloc_trdev(size);
- else
-#endif
- dev_lec[i] = alloc_etherdev(size);
+ dev_lec[i] = alloc_etherdev(size);
if (!dev_lec[i])
return -ENOMEM;
dev_lec[i]->netdev_ops = &lec_netdev_ops;
@@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
}
priv = netdev_priv(dev_lec[i]);
- priv->is_trdev = is_trdev;
} else {
priv = netdev_priv(dev_lec[i]);
if (priv->lecd)
@@ -1255,7 +1141,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
struct sk_buff *skb;
struct lec_priv *priv = netdev_priv(dev);
- if (compare_ether_addr(lan_dst, dev->dev_addr))
+ if (!ether_addr_equal(lan_dst, dev->dev_addr))
return 0; /* not our mac address */
kfree(priv->tlvs); /* NULL if there was no previous association */
@@ -1662,7 +1548,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
hlist_for_each_entry(entry, node, head, next) {
- if (!compare_ether_addr(mac_addr, entry->mac_addr))
+ if (ether_addr_equal(mac_addr, entry->mac_addr))
return entry;
}
return NULL;
@@ -1849,7 +1735,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
case 1:
return priv->mcast_vcc;
case 2: /* LANE2 wants arp for multicast addresses */
- if (!compare_ether_addr(mac_to_find, bus_mac))
+ if (ether_addr_equal(mac_to_find, bus_mac))
return priv->mcast_vcc;
break;
default:
@@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv,
struct hlist_node *node, *next;
struct lec_arp_table *entry, *tmp;
struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
- unsigned char *src;
-#ifdef CONFIG_TR
- struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
-
- if (priv->is_trdev)
- src = tr_hdr->h_source;
- else
-#endif
- src = hdr->h_source;
+ unsigned char *src = hdr->h_source;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
hlist_for_each_entry_safe(entry, node, next,
diff --git a/net/atm/lec.h b/net/atm/lec.h
index dfc071966463..a86aff9a3c04 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -55,11 +55,11 @@ struct lane2_ops {
* frames.
*
* 1. Dix Ethernet EtherType frames encoded by placing EtherType
- * field in h_type field. Data follows immediatelly after header.
+ * field in h_type field. Data follows immediately after header.
* 2. LLC Data frames whose total length, including LLC field and data,
* but not padding required to meet the minimum data frame length,
* is less than 1536(0x0600) MUST be encoded by placing that length
- * in the h_type field. The LLC field follows header immediatelly.
+ * in the h_type field. The LLC field follows header immediately.
* 3. LLC data frames longer than this maximum MUST be encoded by placing
* the value 0 in the h_type field.
*
@@ -142,7 +142,6 @@ struct lec_priv {
int itfnum; /* e.g. 2 for lec2, 5 for lec5 */
struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */
int is_proxy; /* bridge between ATM and Ethernet */
- int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */
};
struct lec_vcc_priv {
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index aa972409f093..d4cc1be5c364 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
goto non_ip;
while (i < mpc->number_of_mps_macs) {
- if (!compare_ether_addr(eth->h_dest,
- (mpc->mps_macs + i*ETH_ALEN)))
+ if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN))
if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
return NETDEV_TX_OK;
i++;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 53e500292271..5bdd300db0f7 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
char *page, *p;
- unsigned len;
+ unsigned int len;
if (nbytes == 0)
return 0;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 614d3fc47ede..ce1e59fdae7b 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -62,12 +62,25 @@ struct pppoatm_vcc {
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
+ atomic_t inflight;
+ unsigned long blocked;
int flags; /* SC_COMP_PROT - compress protocol */
struct ppp_channel chan; /* interface to generic ppp layer */
struct tasklet_struct wakeup_tasklet;
};
/*
+ * We want to allow two packets in the queue. The one that's currently in
+ * flight, and *one* queued up ready for the ATM device to send immediately
+ * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
+ * inflight == -2 represents an empty queue, -1 one packet, and zero means
+ * there are two packets in the queue.
+ */
+#define NONE_INFLIGHT -2
+
+#define BLOCKED 0
+
+/*
* Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
* ID (0xC021) used in autodetection
*/
@@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg)
static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
+
pvcc->old_pop(atmvcc, skb);
+ atomic_dec(&pvcc->inflight);
+
/*
- * We don't really always want to do this since it's
- * really inefficient - it would be much better if we could
- * test if we had actually throttled the generic layer.
- * Unfortunately then there would be a nasty SMP race where
- * we could clear that flag just as we refuse another packet.
- * For now we do the safe thing.
+ * We always used to run the wakeup tasklet unconditionally here, for
+ * fear of race conditions where we clear the BLOCKED flag just as we
+ * refuse another packet in pppoatm_send(). This was quite inefficient.
+ *
+ * In fact it's OK. The PPP core will only ever call pppoatm_send()
+ * while holding the channel->downl lock. And ppp_output_wakeup() as
+ * called by the tasklet will *also* grab that lock. So even if another
+ * CPU is in pppoatm_send() right now, the tasklet isn't going to race
+ * with it. The wakeup *will* happen after the other CPU is safely out
+ * of pppoatm_send() again.
+ *
+ * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
+ * it about to return, that's fine. We trigger a wakeup which will
+ * happen later. And if the CPU in pppoatm_send() *hasn't* set the
+ * BLOCKED bit yet, that's fine too because of the double check in
+ * pppoatm_may_send() which is commented there.
*/
- tasklet_schedule(&pvcc->wakeup_tasklet);
+ if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
+ tasklet_schedule(&pvcc->wakeup_tasklet);
}
/*
@@ -184,6 +211,51 @@ error:
ppp_input_error(&pvcc->chan, 0);
}
+static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
+{
+ /*
+ * It's not clear that we need to bother with using atm_may_send()
+ * to check we don't exceed sk->sk_sndbuf. If userspace sets a
+ * value of sk_sndbuf which is lower than the MTU, we're going to
+ * block for ever. But the code always did that before we introduced
+ * the packet count limit, so...
+ */
+ if (atm_may_send(pvcc->atmvcc, size) &&
+ atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+ return 1;
+
+ /*
+ * We use test_and_set_bit() rather than set_bit() here because
+ * we need to ensure there's a memory barrier after it. The bit
+ * *must* be set before we do the atomic_inc() on pvcc->inflight.
+ * There's no smp_mb__after_set_bit(), so it's this or abuse
+ * smp_mb__after_clear_bit().
+ */
+ test_and_set_bit(BLOCKED, &pvcc->blocked);
+
+ /*
+ * We may have raced with pppoatm_pop(). If it ran for the
+ * last packet in the queue, *just* before we set the BLOCKED
+ * bit, then it might never run again and the channel could
+ * remain permanently blocked. Cope with that race by checking
+ * *again*. If it did run in that window, we'll have space on
+ * the queue now and can return success. It's harmless to leave
+ * the BLOCKED flag set, since it's only used as a trigger to
+ * run the wakeup tasklet. Another wakeup will never hurt.
+ * If pppoatm_pop() is running but hasn't got as far as making
+ * space on the queue yet, then it hasn't checked the BLOCKED
+ * flag yet either, so we're safe in that case too. It'll issue
+ * an "immediate" wakeup... where "immediate" actually involves
+ * taking the PPP channel's ->downl lock, which is held by the
+ * code path that calls pppoatm_send(), and is thus going to
+ * wait for us to finish.
+ */
+ if (atm_may_send(pvcc->atmvcc, size) &&
+ atomic_inc_not_zero(&pvcc->inflight))
+ return 1;
+
+ return 0;
+}
/*
* Called by the ppp_generic.c to send a packet - returns true if packet
* was accepted. If we return false, then it's our job to call
@@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
struct sk_buff *n;
n = skb_realloc_headroom(skb, LLC_LEN);
if (n != NULL &&
- !atm_may_send(pvcc->atmvcc, n->truesize)) {
+ !pppoatm_may_send(pvcc, n->truesize)) {
kfree_skb(n);
goto nospace;
}
@@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
skb = n;
if (skb == NULL)
return DROP_PACKET;
- } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+ } else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
break;
case e_vc:
- if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+ if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
break;
case e_autodetect:
@@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
if (pvcc == NULL)
return -ENOMEM;
pvcc->atmvcc = atmvcc;
+
+ /* Maximum is zero, so that we can use atomic_inc_not_zero() */
+ atomic_set(&pvcc->inflight, NONE_INFLIGHT);
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 509c8ac02b63..86767ca908a3 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
{
struct sk_buff *skb;
struct atmsvc_msg *msg;
- static unsigned session = 0;
+ static unsigned int session = 0;
pr_debug("%d (0x%p)\n", (int)type, vcc);
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 0906c194a413..051f7abae66d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1990,7 +1990,6 @@ static int __init ax25_init(void)
sock_register(&ax25_family_ops);
dev_add_pack(&ax25_packet_type);
register_netdevice_notifier(&ax25_dev_notifier);
- ax25_register_sysctl();
proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
@@ -2011,16 +2010,16 @@ static void __exit ax25_exit(void)
proc_net_remove(&init_net, "ax25_route");
proc_net_remove(&init_net, "ax25");
proc_net_remove(&init_net, "ax25_calls");
- ax25_rt_free();
- ax25_uid_free();
- ax25_dev_free();
- ax25_unregister_sysctl();
unregister_netdevice_notifier(&ax25_dev_notifier);
dev_remove_pack(&ax25_packet_type);
sock_unregister(PF_AX25);
proto_unregister(&ax25_proto);
+
+ ax25_rt_free();
+ ax25_uid_free();
+ ax25_dev_free();
}
module_exit(ax25_exit);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d0de30e89591..3d106767b272 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -59,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev)
return;
}
- ax25_unregister_sysctl();
-
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);
@@ -90,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev)
ax25_dev_list = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
- ax25_register_sysctl();
+ ax25_register_dev_sysctl(ax25_dev);
}
void ax25_dev_device_down(struct net_device *dev)
@@ -100,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev)
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
- ax25_unregister_sysctl();
+ ax25_unregister_dev_sysctl(ax25_dev);
spin_lock_bh(&ax25_dev_lock);
@@ -120,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev)
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
- ax25_register_sysctl();
return;
}
@@ -130,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev)
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
- ax25_register_sysctl();
return;
}
@@ -138,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev)
}
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
-
- ax25_register_sysctl();
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 846ae4e2b115..67de6b33f2c3 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -48,7 +48,7 @@
int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
+ const void *saddr, unsigned int len)
{
unsigned char *buff;
@@ -219,7 +219,7 @@ put:
int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
+ const void *saddr, unsigned int len)
{
return -AX25_HEADER_LEN;
}
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3f1d83..d5744b752511 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
#endif
-static struct ctl_table_header *ax25_table_header;
-
-static ctl_table *ax25_table;
-static int ax25_table_size;
-
-static struct ctl_path ax25_path[] = {
- { .procname = "net", },
- { .procname = "ax25", },
- { }
-};
-
static const ctl_table ax25_param_table[] = {
{
.procname = "ip_default_mode",
@@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = {
{ } /* that's all, folks! */
};
-void ax25_register_sysctl(void)
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
{
- ax25_dev *ax25_dev;
- int n, k;
-
- spin_lock_bh(&ax25_dev_lock);
- for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
- ax25_table_size += sizeof(ctl_table);
-
- if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
- spin_unlock_bh(&ax25_dev_lock);
- return;
- }
-
- for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
- struct ctl_table *child = kmemdup(ax25_param_table,
- sizeof(ax25_param_table),
- GFP_ATOMIC);
- if (!child) {
- while (n--)
- kfree(ax25_table[n].child);
- kfree(ax25_table);
- spin_unlock_bh(&ax25_dev_lock);
- return;
- }
- ax25_table[n].child = ax25_dev->systable = child;
- ax25_table[n].procname = ax25_dev->dev->name;
- ax25_table[n].mode = 0555;
-
-
- for (k = 0; k < AX25_MAX_VALUES; k++)
- child[k].data = &ax25_dev->values[k];
-
- n++;
+ char path[sizeof("net/ax25/") + IFNAMSIZ];
+ int k;
+ struct ctl_table *table;
+
+ table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (k = 0; k < AX25_MAX_VALUES; k++)
+ table[k].data = &ax25_dev->values[k];
+
+ snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
+ ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
+ if (!ax25_dev->sysheader) {
+ kfree(table);
+ return -ENOMEM;
}
- spin_unlock_bh(&ax25_dev_lock);
-
- ax25_table_header = register_sysctl_paths(ax25_path, ax25_table);
+ return 0;
}
-void ax25_unregister_sysctl(void)
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
{
- ctl_table *p;
- unregister_sysctl_table(ax25_table_header);
-
- for (p = ax25_table; p->procname; p++)
- kfree(p->child);
- kfree(ax25_table);
+ struct ctl_table_header *header = ax25_dev->sysheader;
+ struct ctl_table *table;
+
+ if (header) {
+ ax25_dev->sysheader = NULL;
+ table = header->ctl_table_arg;
+ unregister_net_sysctl_table(header);
+ kfree(table);
+ }
}
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2b68d068eaf3..53f5244e28f8 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -7,19 +7,28 @@ config BATMAN_ADV
depends on NET
select CRC16
default n
- ---help---
+ help
+ B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
+ a routing protocol for multi-hop ad-hoc mesh networks. The
+ networks may be wired or wireless. See
+ http://www.open-mesh.org/ for more information and user space
+ tools.
- B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
- a routing protocol for multi-hop ad-hoc mesh networks. The
- networks may be wired or wireless. See
- http://www.open-mesh.org/ for more information and user space
- tools.
+config BATMAN_ADV_BLA
+ bool "Bridge Loop Avoidance"
+ depends on BATMAN_ADV && INET
+ default y
+ help
+ This option enables BLA (Bridge Loop Avoidance), a mechanism
+ to avoid Ethernet frames looping when mesh nodes are connected
+ to both the same LAN and the same mesh. If you will never use
+ more than one mesh node in the same LAN, you can safely remove
+ this feature and save some space.
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
- depends on BATMAN_ADV != n
- ---help---
-
+ depends on BATMAN_ADV
+ help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
outputting debugging information to the kernel log. The
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 4e392ebedb64..6d5c1940667d 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o
batman-adv-y += bat_iv_ogm.o
batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index c3b0548b175d..3b588f86d770 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -32,6 +32,7 @@
#include "soft-interface.h"
#include "vis.h"
#include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
static struct dentry *bat_debugfs;
@@ -82,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
va_start(args, fmt);
vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- fdebug_log(bat_priv->debug_log, "[%10lu] %s",
- (jiffies / HZ), tmp_log_buf);
+ fdebug_log(bat_priv->debug_log, "[%10u] %s",
+ jiffies_to_msecs(jiffies), tmp_log_buf);
va_end(args);
return 0;
@@ -238,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file)
return single_open(file, gw_client_seq_print_text, net_dev);
}
-static int softif_neigh_open(struct inode *inode, struct file *file)
+static int transtable_global_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, softif_neigh_seq_print_text, net_dev);
+ return single_open(file, tt_global_seq_print_text, net_dev);
}
-static int transtable_global_open(struct inode *inode, struct file *file)
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int bla_claim_table_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, tt_global_seq_print_text, net_dev);
+ return single_open(file, bla_claim_table_seq_print_text, net_dev);
}
+#endif
static int transtable_local_open(struct inode *inode, struct file *file)
{
@@ -282,16 +285,20 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
+#endif
static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
static struct bat_debuginfo *mesh_debuginfos[] = {
&bat_debuginfo_originators,
&bat_debuginfo_gateways,
- &bat_debuginfo_softif_neigh,
&bat_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &bat_debuginfo_bla_claim_table,
+#endif
&bat_debuginfo_transtable_local,
&bat_debuginfo_vis_data,
NULL,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a6d5d63fb6ad..dc53798ebb47 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -30,33 +30,69 @@
#include "send.h"
#include "bat_algo.h"
-static void bat_iv_ogm_init(struct hard_iface *hard_iface)
+static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ struct orig_node *orig_node,
+ struct orig_node *orig_neigh,
+ uint32_t seqno)
+{
+ struct neigh_node *neigh_node;
+
+ neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
+ if (!neigh_node)
+ goto out;
+
+ INIT_LIST_HEAD(&neigh_node->bonding_list);
+
+ neigh_node->orig_node = orig_neigh;
+ neigh_node->if_incoming = hard_iface;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+out:
+ return neigh_node;
+}
+
+static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
+ uint32_t random_seqno;
+ int res = -1;
- hard_iface->packet_len = BATMAN_OGM_LEN;
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&hard_iface->seqno, random_seqno);
+
+ hard_iface->packet_len = BATMAN_OGM_HLEN;
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
+ if (!hard_iface->packet_buff)
+ goto out;
+
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->header.packet_type = BAT_OGM;
+ batman_ogm_packet->header.packet_type = BAT_IV_OGM;
batman_ogm_packet->header.version = COMPAT_VERSION;
batman_ogm_packet->header.ttl = 2;
batman_ogm_packet->flags = NO_FLAGS;
batman_ogm_packet->tq = TQ_MAX_VALUE;
batman_ogm_packet->tt_num_changes = 0;
batman_ogm_packet->ttvn = 0;
+
+ res = 0;
+
+out:
+ return res;
}
-static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
-
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
- batman_ogm_packet->header.ttl = TTL;
+ kfree(hard_iface->packet_buff);
+ hard_iface->packet_buff = NULL;
}
-static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
@@ -67,6 +103,15 @@ static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
hard_iface->net_dev->dev_addr, ETH_ALEN);
}
+static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
+ batman_ogm_packet->header.ttl = TTL;
+}
+
/* when do we schedule our own ogm to be sent */
static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
{
@@ -92,7 +137,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
int tt_num_changes)
{
- int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
+ int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -132,7 +177,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
"Sending own" :
"Forwarding"));
bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+ "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
@@ -142,7 +187,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
batman_ogm_packet->ttvn, hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
- buff_pos += BATMAN_OGM_LEN +
+ buff_pos += BATMAN_OGM_HLEN +
tt_len(batman_ogm_packet->tt_num_changes);
packet_num++;
batman_ogm_packet = (struct batman_ogm_packet *)
@@ -191,7 +236,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
/* FIXME: what about aggregated packets ? */
bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
+ "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
(forw_packet->own ? "Sending own" : "Forwarding"),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
@@ -335,10 +380,9 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
if ((atomic_read(&bat_priv->aggregated_ogms)) &&
(packet_len < MAX_AGGREGATION_BYTES))
forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- sizeof(struct ethhdr));
+ ETH_HLEN);
else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len +
- sizeof(struct ethhdr));
+ forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
if (!forw_packet_aggr->skb) {
if (!own_packet)
@@ -346,7 +390,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
kfree(forw_packet_aggr);
goto out;
}
- skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
+ skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
INIT_HLIST_NODE(&forw_packet_aggr->list);
@@ -461,11 +505,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
static void bat_iv_ogm_forward(struct orig_node *orig_node,
const struct ethhdr *ethhdr,
struct batman_ogm_packet *batman_ogm_packet,
- int directlink, struct hard_iface *if_incoming)
+ bool is_single_hop_neigh,
+ bool is_from_best_next_hop,
+ struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *router;
- uint8_t in_tq, in_ttl, tq_avg = 0;
uint8_t tt_num_changes;
if (batman_ogm_packet->header.ttl <= 1) {
@@ -473,54 +517,43 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
return;
}
- router = orig_node_get_router(orig_node);
+ if (!is_from_best_next_hop) {
+ /* Mark the forwarded packet when it is not coming from our
+ * best next hop. We still need to forward the packet for our
+ * neighbor link quality detection to work in case the packet
+ * originated from a single hop neighbor. Otherwise we can
+ * simply drop the ogm.
+ */
+ if (is_single_hop_neigh)
+ batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
+ else
+ return;
+ }
- in_tq = batman_ogm_packet->tq;
- in_ttl = batman_ogm_packet->header.ttl;
tt_num_changes = batman_ogm_packet->tt_num_changes;
batman_ogm_packet->header.ttl--;
memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
- /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
- * of our best tq value */
- if (router && router->tq_avg != 0) {
-
- /* rebroadcast ogm of best ranking neighbor as is */
- if (!compare_eth(router->addr, ethhdr->h_source)) {
- batman_ogm_packet->tq = router->tq_avg;
-
- if (router->last_ttl)
- batman_ogm_packet->header.ttl =
- router->last_ttl - 1;
- }
-
- tq_avg = router->tq_avg;
- }
-
- if (router)
- neigh_node_free_ref(router);
-
/* apply hop penalty */
batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
- in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
- batman_ogm_packet->header.ttl);
+ "Forwarding packet: tq: %i, ttl: %i\n",
+ batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
/* switch of primaries first hop flag when forwarding */
batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
- if (directlink)
+ if (is_single_hop_neigh)
batman_ogm_packet->flags |= DIRECTLINK;
else
batman_ogm_packet->flags &= ~DIRECTLINK;
bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
- BATMAN_OGM_LEN + tt_len(tt_num_changes),
+ BATMAN_OGM_HLEN + tt_len(tt_num_changes),
if_incoming, 0, bat_iv_ogm_fwd_send_time());
}
@@ -603,12 +636,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
if (is_duplicate)
continue;
- spin_lock_bh(&tmp_neigh_node->tq_lock);
+ spin_lock_bh(&tmp_neigh_node->lq_update_lock);
ring_buffer_set(tmp_neigh_node->tq_recv,
&tmp_neigh_node->tq_index, 0);
tmp_neigh_node->tq_avg =
ring_buffer_avg(tmp_neigh_node->tq_recv);
- spin_unlock_bh(&tmp_neigh_node->tq_lock);
+ spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
}
if (!neigh_node) {
@@ -618,8 +651,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
if (!orig_tmp)
goto unlock;
- neigh_node = create_neighbor(orig_node, orig_tmp,
- ethhdr->h_source, if_incoming);
+ neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
+ orig_node, orig_tmp,
+ batman_ogm_packet->seqno);
orig_node_free_ref(orig_tmp);
if (!neigh_node)
@@ -631,14 +665,14 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
rcu_read_unlock();
orig_node->flags = batman_ogm_packet->flags;
- neigh_node->last_valid = jiffies;
+ neigh_node->last_seen = jiffies;
- spin_lock_bh(&neigh_node->tq_lock);
+ spin_lock_bh(&neigh_node->lq_update_lock);
ring_buffer_set(neigh_node->tq_recv,
&neigh_node->tq_index,
batman_ogm_packet->tq);
neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
- spin_unlock_bh(&neigh_node->tq_lock);
+ spin_unlock_bh(&neigh_node->lq_update_lock);
if (!is_duplicate) {
orig_node->last_ttl = batman_ogm_packet->header.ttl;
@@ -744,19 +778,20 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
rcu_read_unlock();
if (!neigh_node)
- neigh_node = create_neighbor(orig_neigh_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
+ neigh_node = bat_iv_ogm_neigh_new(if_incoming,
+ orig_neigh_node->orig,
+ orig_neigh_node,
+ orig_neigh_node,
+ batman_ogm_packet->seqno);
if (!neigh_node)
goto out;
- /* if orig_node is direct neighbor update neigh_node last_valid */
+ /* if orig_node is direct neighbor update neigh_node last_seen */
if (orig_node == orig_neigh_node)
- neigh_node->last_valid = jiffies;
+ neigh_node->last_seen = jiffies;
- orig_node->last_valid = jiffies;
+ orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
spin_lock_bh(&orig_node->ogm_cnt_lock);
@@ -842,7 +877,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
- if (window_protected(bat_priv, seq_diff,
+ if (!hlist_empty(&orig_node->neigh_list) &&
+ window_protected(bat_priv, seq_diff,
&orig_node->batman_seqno_reset))
goto out;
@@ -850,9 +886,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
- is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_ogm_packet->seqno);
+ is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ batman_ogm_packet->seqno);
if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
(tmp_neigh_node->if_incoming == if_incoming))
@@ -866,13 +902,14 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
- bit_packet_count(tmp_neigh_node->real_bits);
+ bitmap_weight(tmp_neigh_node->real_bits,
+ TQ_LOCAL_WINDOW_SIZE);
}
rcu_read_unlock();
if (need_update) {
bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %d, new %d\n",
+ "updating last_seqno: old %u, new %u\n",
orig_node->last_real_seqno, batman_ogm_packet->seqno);
orig_node->last_real_seqno = batman_ogm_packet->seqno;
}
@@ -897,7 +934,9 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
struct neigh_node *orig_neigh_router = NULL;
int has_directlink_flag;
int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+ int is_broadcast = 0, is_bidirectional;
+ bool is_single_hop_neigh = false;
+ bool is_from_best_next_hop = false;
int is_duplicate;
uint32_t if_incoming_seqno;
@@ -913,7 +952,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batman_ogm_packet->header.packet_type != BAT_OGM)
+ if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
@@ -921,11 +960,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
- is_single_hop_neigh = (compare_eth(ethhdr->h_source,
- batman_ogm_packet->orig) ? 1 : 0);
+ if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
+ is_single_hop_neigh = true;
bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
@@ -998,11 +1037,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
word = &(orig_neigh_node->bcast_own[offset]);
- bit_mark(word,
- if_incoming_seqno -
+ bat_set_bit(word,
+ if_incoming_seqno -
batman_ogm_packet->seqno - 2);
orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bit_packet_count(word);
+ bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
}
@@ -1019,6 +1058,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
return;
}
+ if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+ ethhdr->h_source);
+ return;
+ }
+
orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
if (!orig_node)
return;
@@ -1043,6 +1089,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
if (router)
router_router = orig_node_get_router(router->orig_node);
+ if ((router && router->tq_avg != 0) &&
+ (compare_eth(router->addr, ethhdr->h_source)))
+ is_from_best_next_hop = true;
+
/* avoid temporary routing loops */
if (router && router_router &&
(compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
@@ -1093,7 +1143,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
/* mark direct link on incoming interface */
bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- 1, if_incoming);
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
@@ -1116,7 +1167,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- 0, if_incoming);
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1132,13 +1184,25 @@ out:
orig_node_free_ref(orig_node);
}
-static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
- struct sk_buff *skb)
+static int bat_iv_ogm_receive(struct sk_buff *skb,
+ struct hard_iface *if_incoming)
{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batman_ogm_packet *batman_ogm_packet;
struct ethhdr *ethhdr;
int buff_pos = 0, packet_len;
unsigned char *tt_buff, *packet_buff;
+ bool ret;
+
+ ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
+ if (!ret)
+ return NET_RX_DROP;
+
+ /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
+ * that does not have B.A.T.M.A.N. IV enabled ?
+ */
+ if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
+ return NET_RX_DROP;
packet_len = skb_headlen(skb);
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1152,31 +1216,50 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
- tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
+ tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
bat_iv_ogm_process(ethhdr, batman_ogm_packet,
tt_buff, if_incoming);
- buff_pos += BATMAN_OGM_LEN +
+ buff_pos += BATMAN_OGM_HLEN +
tt_len(batman_ogm_packet->tt_num_changes);
batman_ogm_packet = (struct batman_ogm_packet *)
(packet_buff + buff_pos);
} while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
batman_ogm_packet->tt_num_changes));
+
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
}
static struct bat_algo_ops batman_iv __read_mostly = {
.name = "BATMAN IV",
- .bat_ogm_init = bat_iv_ogm_init,
- .bat_ogm_init_primary = bat_iv_ogm_init_primary,
- .bat_ogm_update_mac = bat_iv_ogm_update_mac,
+ .bat_iface_enable = bat_iv_ogm_iface_enable,
+ .bat_iface_disable = bat_iv_ogm_iface_disable,
+ .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
+ .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
.bat_ogm_schedule = bat_iv_ogm_schedule,
.bat_ogm_emit = bat_iv_ogm_emit,
- .bat_ogm_receive = bat_iv_ogm_receive,
};
int __init bat_iv_init(void)
{
- return bat_algo_register(&batman_iv);
+ int ret;
+
+ /* batman originator packet */
+ ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
+ if (ret < 0)
+ goto out;
+
+ ret = bat_algo_register(&batman_iv);
+ if (ret < 0)
+ goto handler_unregister;
+
+ goto out;
+
+handler_unregister:
+ recv_handler_unregister(BAT_IV_OGM);
+out:
+ return ret;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 68ff759fc304..5bc7b66d32dc 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \
.store = _store, \
};
-#define BAT_ATTR_STORE_BOOL(_name, _post_func) \
+#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
char *buff, size_t count) \
{ \
@@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
&bat_priv->_name, net_dev); \
}
-#define BAT_ATTR_SHOW_BOOL(_name) \
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff) \
+#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
{ \
struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
return sprintf(buff, "%s\n", \
@@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
"disabled" : "enabled"); \
} \
-/* Use this, if you are going to turn a [name] in bat_priv on or off */
-#define BAT_ATTR_BOOL(_name, _mode, _post_func) \
- static BAT_ATTR_STORE_BOOL(_name, _post_func) \
- static BAT_ATTR_SHOW_BOOL(_name) \
+/* Use this, if you are going to turn a [name] in the soft-interface
+ * (bat_priv) on or off */
+#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
+ static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ static BAT_ATTR_SIF_SHOW_BOOL(_name) \
static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \
+#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
+ char *buff, size_t count) \
{ \
struct net_device *net_dev = kobj_to_netdev(kobj); \
struct bat_priv *bat_priv = netdev_priv(net_dev); \
@@ -100,19 +101,62 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
attr, &bat_priv->_name, net_dev); \
}
-#define BAT_ATTR_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff) \
+#define BAT_ATTR_SIF_SHOW_UINT(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
{ \
struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
} \
-/* Use this, if you are going to set [name] in bat_priv to unsigned integer
- * values only */
-#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_SHOW_UINT(_name) \
+/* Use this, if you are going to set [name] in the soft-interface
+ * (bat_priv) to an unsigned integer value */
+#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
+ static BAT_ATTR_SIF_SHOW_UINT(_name) \
+ static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
+
+
+#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
+ char *buff, size_t count) \
+{ \
+ struct net_device *net_dev = kobj_to_netdev(kobj); \
+ struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
+ ssize_t length; \
+ \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = __store_uint_attr(buff, count, _min, _max, _post_func, \
+ attr, &hard_iface->_name, net_dev); \
+ \
+ hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+#define BAT_ATTR_HIF_SHOW_UINT(_name) \
+ssize_t show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct net_device *net_dev = kobj_to_netdev(kobj); \
+ struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
+ ssize_t length; \
+ \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
+ \
+ hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value*/
+#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ static BAT_ATTR_HIF_SHOW_UINT(_name) \
static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
@@ -149,7 +193,7 @@ static int store_bool_attr(char *buff, size_t count,
atomic_read(attr) == 1 ? "enabled" : "disabled",
enabled == 1 ? "enabled" : "disabled");
- atomic_set(attr, (unsigned)enabled);
+ atomic_set(attr, (unsigned int)enabled);
return count;
}
@@ -268,7 +312,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
"client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
"client" : "server");
- atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
+ atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
return count;
}
@@ -354,7 +398,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
curr_gw_mode_str, buff);
gw_deselect(bat_priv);
- atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp);
+ atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
return count;
}
@@ -384,26 +428,32 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
return gw_bandwidth_set(net_dev, buff, count);
}
-BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
-BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
+BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
+BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
-BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
-BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
-BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
- post_gw_deselect);
+BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
+BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
+BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
+ post_gw_deselect);
static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
+BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
#endif
static struct bat_attribute *mesh_attrs[] = {
&bat_attr_aggregated_ogms,
&bat_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &bat_attr_bridge_loop_avoidance,
+#endif
&bat_attr_fragmentation,
&bat_attr_ap_isolation,
&bat_attr_vis_mode,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 6d0aa216b232..07ae6e1b8aca 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -24,100 +24,13 @@
#include <linux/bitops.h>
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
- uint32_t curr_seqno)
-{
- int32_t diff, word_offset, word_num;
-
- diff = last_seqno - curr_seqno;
- if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
- return 0;
- } else {
- /* which word */
- word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
- /* which position in the selected word */
- word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
- if (test_bit(word_offset, &seq_bits[word_num]))
- return 1;
- else
- return 0;
- }
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n)
-{
- int32_t word_offset, word_num;
-
- /* if too old, just drop it */
- if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
- return;
-
- /* which word */
- word_num = n / WORD_BIT_SIZE;
- /* which position in the selected word */
- word_offset = n % WORD_BIT_SIZE;
-
- set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
-}
-
/* shift the packet array by n places. */
-static void bit_shift(unsigned long *seq_bits, int32_t n)
+static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
{
- int32_t word_offset, word_num;
- int32_t i;
-
if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
return;
- word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
- word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */
-
- for (i = NUM_WORDS - 1; i > word_num; i--) {
- /* going from old to new, so we don't overwrite the data we copy
- * from.
- *
- * left is high, right is low: FEDC BA98 7654 3210
- * ^^ ^^
- * vvvv
- * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
- * word_offset==WORD_BIT_SIZE/2 ????? in this example.
- * (=24 bits)
- *
- * our desired output would be: 9876 5432 1000 0000
- * */
-
- seq_bits[i] =
- (seq_bits[i - word_num] << word_offset) +
- /* take the lower port from the left half, shift it left
- * to its final position */
- (seq_bits[i - word_num - 1] >>
- (WORD_BIT_SIZE-word_offset));
- /* and the upper part of the right half and shift it left to
- * its position */
- /* for our example that would be: word[0] = 9800 + 0076 =
- * 9876 */
- }
- /* now for our last word, i==word_num, we only have its "left" half.
- * that's the 1000 word in our example.*/
-
- seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
- /* pad the rest with 0, if there is anything */
- i--;
-
- for (; i >= 0; i--)
- seq_bits[i] = 0;
-}
-
-static void bit_reset_window(unsigned long *seq_bits)
-{
- int i;
- for (i = 0; i < NUM_WORDS; i++)
- seq_bits[i] = 0;
+ bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
}
@@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
if (set_mark)
- bit_mark(seq_bits, -seq_num_diff);
+ bat_set_bit(seq_bits, -seq_num_diff);
return 0;
}
@@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
* set the mark if required */
if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
- bit_shift(seq_bits, seq_num_diff);
+ bat_bitmap_shift_left(seq_bits, seq_num_diff);
if (set_mark)
- bit_mark(seq_bits, 0);
+ bat_set_bit(seq_bits, 0);
return 1;
}
@@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
bat_dbg(DBG_BATMAN, bat_priv,
"We missed a lot of packets (%i) !\n",
seq_num_diff - 1);
- bit_reset_window(seq_bits);
+ bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bit_mark(seq_bits, 0);
+ bat_set_bit(seq_bits, 0);
return 1;
}
@@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
bat_dbg(DBG_BATMAN, bat_priv,
"Other host probably restarted!\n");
- bit_reset_window(seq_bits);
+ bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bit_mark(seq_bits, 0);
+ bat_set_bit(seq_bits, 0);
return 1;
}
@@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
/* never reached */
return 0;
}
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(const unsigned long *seq_bits)
-{
- int i, hamming = 0;
-
- for (i = 0; i < NUM_WORDS; i++)
- hamming += hweight_long(seq_bits[i]);
-
- return hamming;
-}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index c6135728a680..1835c15cda41 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,23 +22,33 @@
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
-#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
-
/* returns true if the corresponding bit in the given seq_bits indicates true
* and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
- uint32_t curr_seqno);
+static inline int bat_test_bit(const unsigned long *seq_bits,
+ uint32_t last_seqno, uint32_t curr_seqno)
+{
+ int32_t diff;
+
+ diff = last_seqno - curr_seqno;
+ if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+ return 0;
+ else
+ return test_bit(diff, seq_bits);
+}
/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n);
+static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+{
+ /* if too old, just drop it */
+ if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+ return;
+ set_bit(n, seq_bits); /* turn the position on */
+}
/* receive and process one packet, returns 1 if received seq_num is considered
* new, 0 if old */
int bit_get_packet(void *priv, unsigned long *seq_bits,
int32_t seq_num_diff, int set_mark);
-/* count the hamming weight, how many good packets did we receive? */
-int bit_packet_count(const unsigned long *seq_bits);
-
#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 000000000000..8bf97515a77d
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "hash.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "bridge_loop_avoidance.h"
+#include "translation-table.h"
+#include "send.h"
+
+#include <linux/etherdevice.h>
+#include <linux/crc16.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+
+static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+
+static void bla_periodic_work(struct work_struct *work);
+static void bla_send_announce(struct bat_priv *bat_priv,
+ struct backbone_gw *backbone_gw);
+
+/* return the index of the claim */
+static inline uint32_t choose_claim(const void *data, uint32_t size)
+{
+ const unsigned char *key = data;
+ uint32_t hash = 0;
+ size_t i;
+
+ for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash % size;
+}
+
+/* return the index of the backbone gateway */
+static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+{
+ const unsigned char *key = data;
+ uint32_t hash = 0;
+ size_t i;
+
+ for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash % size;
+}
+
+
+/* compares address and vid of two backbone gws */
+static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+{
+ const void *data1 = container_of(node, struct backbone_gw,
+ hash_entry);
+
+ return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* compares address and vid of two claims */
+static int compare_claim(const struct hlist_node *node, const void *data2)
+{
+ const void *data1 = container_of(node, struct claim,
+ hash_entry);
+
+ return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* free a backbone gw */
+static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+{
+ if (atomic_dec_and_test(&backbone_gw->refcount))
+ kfree_rcu(backbone_gw, rcu);
+}
+
+/* finally deinitialize the claim */
+static void claim_free_rcu(struct rcu_head *rcu)
+{
+ struct claim *claim;
+
+ claim = container_of(rcu, struct claim, rcu);
+
+ backbone_gw_free_ref(claim->backbone_gw);
+ kfree(claim);
+}
+
+/* free a claim, call claim_free_rcu if its the last reference */
+static void claim_free_ref(struct claim *claim)
+{
+ if (atomic_dec_and_test(&claim->refcount))
+ call_rcu(&claim->rcu, claim_free_rcu);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: search data (may be local/static data)
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct claim *claim_hash_find(struct bat_priv *bat_priv,
+ struct claim *data)
+{
+ struct hashtable_t *hash = bat_priv->claim_hash;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct claim *claim;
+ struct claim *claim_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ index = choose_claim(data, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ if (!compare_claim(&claim->hash_entry, data))
+ continue;
+
+ if (!atomic_inc_not_zero(&claim->refcount))
+ continue;
+
+ claim_tmp = claim;
+ break;
+ }
+ rcu_read_unlock();
+
+ return claim_tmp;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ * @vid: the VLAN ID
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
+ uint8_t *addr, short vid)
+{
+ struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct backbone_gw search_entry, *backbone_gw;
+ struct backbone_gw *backbone_gw_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ memcpy(search_entry.orig, addr, ETH_ALEN);
+ search_entry.vid = vid;
+
+ index = choose_backbone_gw(&search_entry, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ if (!compare_backbone_gw(&backbone_gw->hash_entry,
+ &search_entry))
+ continue;
+
+ if (!atomic_inc_not_zero(&backbone_gw->refcount))
+ continue;
+
+ backbone_gw_tmp = backbone_gw;
+ break;
+ }
+ rcu_read_unlock();
+
+ return backbone_gw_tmp;
+}
+
+/* delete all claims for a backbone */
+static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+{
+ struct hashtable_t *hash;
+ struct hlist_node *node, *node_tmp;
+ struct hlist_head *head;
+ struct claim *claim;
+ int i;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+
+ hash = backbone_gw->bat_priv->claim_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(claim, node, node_tmp,
+ head, hash_entry) {
+
+ if (claim->backbone_gw != backbone_gw)
+ continue;
+
+ claim_free_ref(claim);
+ hlist_del_rcu(node);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ /* all claims gone, intialize CRC */
+ backbone_gw->crc = BLA_CRC_INIT;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address to be announced within the claim
+ * @vid: the VLAN ID
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
+ *
+ * sends a claim frame according to the provided info.
+ */
+static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
+ short vid, int claimtype)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ struct hard_iface *primary_if;
+ struct net_device *soft_iface;
+ uint8_t *hw_src;
+ struct bla_claim_dst local_claim_dest;
+ uint32_t zeroip = 0;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return;
+
+ memcpy(&local_claim_dest, &bat_priv->claim_dest,
+ sizeof(local_claim_dest));
+ local_claim_dest.type = claimtype;
+
+ soft_iface = primary_if->soft_iface;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ /* IP DST: 0.0.0.0 */
+ zeroip,
+ primary_if->soft_iface,
+ /* IP SRC: 0.0.0.0 */
+ zeroip,
+ /* Ethernet DST: Broadcast */
+ NULL,
+ /* Ethernet SRC/HW SRC: originator mac */
+ primary_if->net_dev->dev_addr,
+ /* HW DST: FF:43:05:XX:00:00
+ * with XX = claim type
+ * and YY:YY = group id
+ */
+ (uint8_t *)&local_claim_dest);
+
+ if (!skb)
+ goto out;
+
+ ethhdr = (struct ethhdr *)skb->data;
+ hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+
+ /* now we pretend that the client would have sent this ... */
+ switch (claimtype) {
+ case CLAIM_TYPE_ADD:
+ /* normal claim frame
+ * set Ethernet SRC to the clients mac
+ */
+ memcpy(ethhdr->h_source, mac, ETH_ALEN);
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+ break;
+ case CLAIM_TYPE_DEL:
+ /* unclaim frame
+ * set HW SRC to the clients mac
+ */
+ memcpy(hw_src, mac, ETH_ALEN);
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+ break;
+ case CLAIM_TYPE_ANNOUNCE:
+ /* announcement frame
+ * set HW SRC to the special mac containg the crc
+ */
+ memcpy(hw_src, mac, ETH_ALEN);
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+ ethhdr->h_source, vid);
+ break;
+ case CLAIM_TYPE_REQUEST:
+ /* request frame
+ * set HW SRC to the special mac containg the crc
+ */
+ memcpy(hw_src, mac, ETH_ALEN);
+ memcpy(ethhdr->h_dest, mac, ETH_ALEN);
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+ ethhdr->h_source, ethhdr->h_dest, vid);
+ break;
+
+ }
+
+ if (vid != -1)
+ skb = vlan_insert_tag(skb, vid);
+
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, soft_iface);
+ bat_priv->stats.rx_packets++;
+ bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+ soft_iface->last_rx = jiffies;
+
+ netif_rx(skb);
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address of the originator
+ * @vid: the VLAN ID
+ *
+ * searches for the backbone gw or creates a new one if it could not
+ * be found.
+ */
+static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
+ uint8_t *orig, short vid)
+{
+ struct backbone_gw *entry;
+ struct orig_node *orig_node;
+ int hash_added;
+
+ entry = backbone_hash_find(bat_priv, orig, vid);
+
+ if (entry)
+ return entry;
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+ orig, vid);
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
+
+ entry->vid = vid;
+ entry->lasttime = jiffies;
+ entry->crc = BLA_CRC_INIT;
+ entry->bat_priv = bat_priv;
+ atomic_set(&entry->request_sent, 0);
+ memcpy(entry->orig, orig, ETH_ALEN);
+
+ /* one for the hash, one for returning */
+ atomic_set(&entry->refcount, 2);
+
+ hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
+ choose_backbone_gw, entry, &entry->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* hash failed, free the structure */
+ kfree(entry);
+ return NULL;
+ }
+
+ /* this is a gateway now, remove any tt entries */
+ orig_node = orig_hash_find(bat_priv, orig);
+ if (orig_node) {
+ tt_global_del_orig(bat_priv, orig_node,
+ "became a backbone gateway");
+ orig_node_free_ref(orig_node);
+ }
+ return entry;
+}
+
+/* update or add the own backbone gw to make sure we announce
+ * where we receive other backbone gws
+ */
+static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ short vid)
+{
+ struct backbone_gw *backbone_gw;
+
+ backbone_gw = bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr, vid);
+ if (unlikely(!backbone_gw))
+ return;
+
+ backbone_gw->lasttime = jiffies;
+ backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the vid where the request came on
+ *
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
+ * to allow the requester another check if the CRC is correct now.
+ */
+static void bla_answer_request(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if, short vid)
+{
+ struct hlist_node *node;
+ struct hlist_head *head;
+ struct hashtable_t *hash;
+ struct claim *claim;
+ struct backbone_gw *backbone_gw;
+ int i;
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_answer_request(): received a claim request, send all of our own claims again\n");
+
+ backbone_gw = backbone_hash_find(bat_priv,
+ primary_if->net_dev->dev_addr, vid);
+ if (!backbone_gw)
+ return;
+
+ hash = bat_priv->claim_hash;
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ /* only own claims are interesting */
+ if (claim->backbone_gw != backbone_gw)
+ continue;
+
+ bla_send_claim(bat_priv, claim->addr, claim->vid,
+ CLAIM_TYPE_ADD);
+ }
+ rcu_read_unlock();
+ }
+
+ /* finally, send an announcement frame */
+ bla_send_announce(bat_priv, backbone_gw);
+ backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @backbone_gw: the backbone gateway from whom we are out of sync
+ *
+ * When the crc is wrong, ask the backbone gateway for a full table update.
+ * After the request, it will repeat all of his own claims and finally
+ * send an announcement claim with which we can check again.
+ */
+static void bla_send_request(struct backbone_gw *backbone_gw)
+{
+ /* first, remove all old entries */
+ bla_del_backbone_claims(backbone_gw);
+
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+ "Sending REQUEST to %pM\n",
+ backbone_gw->orig);
+
+ /* send request */
+ bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+ backbone_gw->vid, CLAIM_TYPE_REQUEST);
+
+ /* no local broadcasts should be sent or received, for now. */
+ if (!atomic_read(&backbone_gw->request_sent)) {
+ atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+ atomic_set(&backbone_gw->request_sent, 1);
+ }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: our backbone gateway which should be announced
+ *
+ * This function sends an announcement. It is called from multiple
+ * places.
+ */
+static void bla_send_announce(struct bat_priv *bat_priv,
+ struct backbone_gw *backbone_gw)
+{
+ uint8_t mac[ETH_ALEN];
+ uint16_t crc;
+
+ memcpy(mac, announce_mac, 4);
+ crc = htons(backbone_gw->crc);
+ memcpy(&mac[4], (uint8_t *)&crc, 2);
+
+ bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address of the claim
+ * @vid: the VLAN ID of the frame
+ * @backbone_gw: the backbone gateway which claims it
+ *
+ * Adds a claim in the claim hash.
+ */
+static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+ const short vid, struct backbone_gw *backbone_gw)
+{
+ struct claim *claim;
+ struct claim search_claim;
+ int hash_added;
+
+ memcpy(search_claim.addr, mac, ETH_ALEN);
+ search_claim.vid = vid;
+ claim = claim_hash_find(bat_priv, &search_claim);
+
+ /* create a new claim entry if it does not exist yet. */
+ if (!claim) {
+ claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
+ if (!claim)
+ return;
+
+ memcpy(claim->addr, mac, ETH_ALEN);
+ claim->vid = vid;
+ claim->lasttime = jiffies;
+ claim->backbone_gw = backbone_gw;
+
+ atomic_set(&claim->refcount, 2);
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+ mac, vid);
+ hash_added = hash_add(bat_priv->claim_hash, compare_claim,
+ choose_claim, claim, &claim->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* only local changes happened. */
+ kfree(claim);
+ return;
+ }
+ } else {
+ claim->lasttime = jiffies;
+ if (claim->backbone_gw == backbone_gw)
+ /* no need to register a new backbone */
+ goto claim_free_ref;
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_add_claim(): changing ownership for %pM, vid %d\n",
+ mac, vid);
+
+ claim->backbone_gw->crc ^=
+ crc16(0, claim->addr, ETH_ALEN);
+ backbone_gw_free_ref(claim->backbone_gw);
+
+ }
+ /* set (new) backbone gw */
+ atomic_inc(&backbone_gw->refcount);
+ claim->backbone_gw = backbone_gw;
+
+ backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ backbone_gw->lasttime = jiffies;
+
+claim_free_ref:
+ claim_free_ref(claim);
+}
+
+/* Delete a claim from the claim hash which has the
+ * given mac address and vid.
+ */
+static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+ const short vid)
+{
+ struct claim search_claim, *claim;
+
+ memcpy(search_claim.addr, mac, ETH_ALEN);
+ search_claim.vid = vid;
+ claim = claim_hash_find(bat_priv, &search_claim);
+ if (!claim)
+ return;
+
+ bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+
+ hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
+ claim_free_ref(claim); /* reference from the hash is gone */
+
+ claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+
+ /* don't need the reference from hash_find() anymore */
+ claim_free_ref(claim);
+}
+
+/* check for ANNOUNCE frame, return 1 if handled */
+static int handle_announce(struct bat_priv *bat_priv,
+ uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+{
+ struct backbone_gw *backbone_gw;
+ uint16_t crc;
+
+ if (memcmp(an_addr, announce_mac, 4) != 0)
+ return 0;
+
+ backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+ if (unlikely(!backbone_gw))
+ return 1;
+
+
+ /* handle as ANNOUNCE frame */
+ backbone_gw->lasttime = jiffies;
+ crc = ntohs(*((uint16_t *)(&an_addr[4])));
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+ vid, backbone_gw->orig, crc);
+
+ if (backbone_gw->crc != crc) {
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+ "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+ backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
+ crc);
+
+ bla_send_request(backbone_gw);
+ } else {
+ /* if we have sent a request and the crc was OK,
+ * we can allow traffic again.
+ */
+ if (atomic_read(&backbone_gw->request_sent)) {
+ atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+ atomic_set(&backbone_gw->request_sent, 0);
+ }
+ }
+
+ backbone_gw_free_ref(backbone_gw);
+ return 1;
+}
+
+/* check for REQUEST frame, return 1 if handled */
+static int handle_request(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ struct ethhdr *ethhdr, short vid)
+{
+ /* check for REQUEST frame */
+ if (!compare_eth(backbone_addr, ethhdr->h_dest))
+ return 0;
+
+ /* sanity check, this should not happen on a normal switch,
+ * we ignore it in this case.
+ */
+ if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+ return 1;
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "handle_request(): REQUEST vid %d (sent by %pM)...\n",
+ vid, ethhdr->h_source);
+
+ bla_answer_request(bat_priv, primary_if, vid);
+ return 1;
+}
+
+/* check for UNCLAIM frame, return 1 if handled */
+static int handle_unclaim(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ uint8_t *claim_addr, short vid)
+{
+ struct backbone_gw *backbone_gw;
+
+ /* unclaim in any case if it is our own */
+ if (primary_if && compare_eth(backbone_addr,
+ primary_if->net_dev->dev_addr))
+ bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+
+ backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+
+ if (!backbone_gw)
+ return 1;
+
+ /* this must be an UNCLAIM frame */
+ bat_dbg(DBG_BLA, bat_priv,
+ "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+ claim_addr, vid, backbone_gw->orig);
+
+ bla_del_claim(bat_priv, claim_addr, vid);
+ backbone_gw_free_ref(backbone_gw);
+ return 1;
+}
+
+/* check for CLAIM frame, return 1 if handled */
+static int handle_claim(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if, uint8_t *backbone_addr,
+ uint8_t *claim_addr, short vid)
+{
+ struct backbone_gw *backbone_gw;
+
+ /* register the gateway if not yet available, and add the claim. */
+
+ backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+ if (unlikely(!backbone_gw))
+ return 1;
+
+ /* this must be a CLAIM frame */
+ bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+ if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+
+ /* TODO: we could call something like tt_local_del() here. */
+
+ backbone_gw_free_ref(backbone_gw);
+ return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hw_src: the Hardware source in the ARP Header
+ * @hw_dst: the Hardware destination in the ARP Header
+ * @ethhdr: pointer to the Ethernet header of the claim frame
+ *
+ * checks if it is a claim packet and if its on the same group.
+ * This function also applies the group ID of the sender
+ * if it is in the same mesh.
+ *
+ * returns:
+ * 2 - if it is a claim packet and on the same group
+ * 1 - if is a claim packet from another group
+ * 0 - if it is not a claim packet
+ */
+static int check_claim_group(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ uint8_t *hw_src, uint8_t *hw_dst,
+ struct ethhdr *ethhdr)
+{
+ uint8_t *backbone_addr;
+ struct orig_node *orig_node;
+ struct bla_claim_dst *bla_dst, *bla_dst_own;
+
+ bla_dst = (struct bla_claim_dst *)hw_dst;
+ bla_dst_own = &bat_priv->claim_dest;
+
+ /* check if it is a claim packet in general */
+ if (memcmp(bla_dst->magic, bla_dst_own->magic,
+ sizeof(bla_dst->magic)) != 0)
+ return 0;
+
+ /* if announcement packet, use the source,
+ * otherwise assume it is in the hw_src
+ */
+ switch (bla_dst->type) {
+ case CLAIM_TYPE_ADD:
+ backbone_addr = hw_src;
+ break;
+ case CLAIM_TYPE_REQUEST:
+ case CLAIM_TYPE_ANNOUNCE:
+ case CLAIM_TYPE_DEL:
+ backbone_addr = ethhdr->h_source;
+ break;
+ default:
+ return 0;
+ }
+
+ /* don't accept claim frames from ourselves */
+ if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ return 0;
+
+ /* if its already the same group, it is fine. */
+ if (bla_dst->group == bla_dst_own->group)
+ return 2;
+
+ /* lets see if this originator is in our mesh */
+ orig_node = orig_hash_find(bat_priv, backbone_addr);
+
+ /* dont accept claims from gateways which are not in
+ * the same mesh or group.
+ */
+ if (!orig_node)
+ return 1;
+
+ /* if our mesh friends mac is bigger, use it for ourselves. */
+ if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
+ bat_dbg(DBG_BLA, bat_priv,
+ "taking other backbones claim group: %04x\n",
+ ntohs(bla_dst->group));
+ bla_dst_own->group = bla_dst->group;
+ }
+
+ orig_node_free_ref(orig_node);
+
+ return 2;
+}
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ *
+ * Check if this is a claim frame, and process it accordingly.
+ *
+ * returns 1 if it was a claim frame, otherwise return 0 to
+ * tell the callee that it can use the frame on its own.
+ */
+static int bla_process_claim(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ struct sk_buff *skb)
+{
+ struct ethhdr *ethhdr;
+ struct vlan_ethhdr *vhdr;
+ struct arphdr *arphdr;
+ uint8_t *hw_src, *hw_dst;
+ struct bla_claim_dst *bla_dst;
+ uint16_t proto;
+ int headlen;
+ short vid = -1;
+ int ret;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+ vhdr = (struct vlan_ethhdr *)ethhdr;
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ proto = ntohs(vhdr->h_vlan_encapsulated_proto);
+ headlen = sizeof(*vhdr);
+ } else {
+ proto = ntohs(ethhdr->h_proto);
+ headlen = ETH_HLEN;
+ }
+
+ if (proto != ETH_P_ARP)
+ return 0; /* not a claim frame */
+
+ /* this must be a ARP frame. check if it is a claim. */
+
+ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
+ return 0;
+
+ /* pskb_may_pull() may have modified the pointers, get ethhdr again */
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+
+ /* Check whether the ARP frame carries a valid
+ * IP information
+ */
+
+ if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+ return 0;
+ if (arphdr->ar_pro != htons(ETH_P_IP))
+ return 0;
+ if (arphdr->ar_hln != ETH_ALEN)
+ return 0;
+ if (arphdr->ar_pln != 4)
+ return 0;
+
+ hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+ hw_dst = hw_src + ETH_ALEN + 4;
+ bla_dst = (struct bla_claim_dst *)hw_dst;
+
+ /* check if it is a claim frame. */
+ ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+ if (ret == 1)
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
+
+ if (ret < 2)
+ return ret;
+
+ /* become a backbone gw ourselves on this vlan if not happened yet */
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+
+ /* check for the different types of claim frames ... */
+ switch (bla_dst->type) {
+ case CLAIM_TYPE_ADD:
+ if (handle_claim(bat_priv, primary_if, hw_src,
+ ethhdr->h_source, vid))
+ return 1;
+ break;
+ case CLAIM_TYPE_DEL:
+ if (handle_unclaim(bat_priv, primary_if,
+ ethhdr->h_source, hw_src, vid))
+ return 1;
+ break;
+
+ case CLAIM_TYPE_ANNOUNCE:
+ if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+ return 1;
+ break;
+ case CLAIM_TYPE_REQUEST:
+ if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+ return 1;
+ break;
+ }
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
+ return 1;
+}
+
+/* Check when we last heard from other nodes, and remove them in case of
+ * a time out, or clean all backbone gws if now is set.
+ */
+static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+{
+ struct backbone_gw *backbone_gw;
+ struct hlist_node *node, *node_tmp;
+ struct hlist_head *head;
+ struct hashtable_t *hash;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ int i;
+
+ hash = bat_priv->backbone_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+ head, hash_entry) {
+ if (now)
+ goto purge_now;
+ if (!has_timed_out(backbone_gw->lasttime,
+ BLA_BACKBONE_TIMEOUT))
+ continue;
+
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+ "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+ backbone_gw->orig);
+
+purge_now:
+ /* don't wait for the pending request anymore */
+ if (atomic_read(&backbone_gw->request_sent))
+ atomic_dec(&bat_priv->bla_num_requests);
+
+ bla_del_backbone_claims(backbone_gw);
+
+ hlist_del_rcu(node);
+ backbone_gw_free_ref(backbone_gw);
+ }
+ spin_unlock_bh(list_lock);
+ }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface, may be NULL if now is set
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we heard last time from our own claims, and remove them in case of
+ * a time out, or clean all claims if now is set
+ */
+static void bla_purge_claims(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if, int now)
+{
+ struct claim *claim;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ struct hashtable_t *hash;
+ int i;
+
+ hash = bat_priv->claim_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ if (now)
+ goto purge_now;
+ if (!compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ continue;
+ if (!has_timed_out(claim->lasttime,
+ BLA_CLAIM_TIMEOUT))
+ continue;
+
+ bat_dbg(DBG_BLA, bat_priv,
+ "bla_purge_claims(): %pM, vid %d, time out\n",
+ claim->addr, claim->vid);
+
+purge_now:
+ handle_unclaim(bat_priv, primary_if,
+ claim->backbone_gw->orig,
+ claim->addr, claim->vid);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the new selected primary_if
+ * @oldif: the old primary interface, may be NULL
+ *
+ * Update the backbone gateways when the own orig address changes.
+ *
+ */
+void bla_update_orig_address(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ struct hard_iface *oldif)
+{
+ struct backbone_gw *backbone_gw;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ struct hashtable_t *hash;
+ int i;
+
+ /* reset bridge loop avoidance group id */
+ bat_priv->claim_dest.group =
+ htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+
+ if (!oldif) {
+ bla_purge_claims(bat_priv, NULL, 1);
+ bla_purge_backbone_gw(bat_priv, 1);
+ return;
+ }
+
+ hash = bat_priv->backbone_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ /* own orig still holds the old value. */
+ if (!compare_eth(backbone_gw->orig,
+ oldif->net_dev->dev_addr))
+ continue;
+
+ memcpy(backbone_gw->orig,
+ primary_if->net_dev->dev_addr, ETH_ALEN);
+ /* send an announce frame so others will ask for our
+ * claims and update their tables.
+ */
+ bla_send_announce(bat_priv, backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+}
+
+
+
+/* (re)start the timer */
+static void bla_start_timer(struct bat_priv *bat_priv)
+{
+ INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
+ msecs_to_jiffies(BLA_PERIOD_LENGTH));
+}
+
+/* periodic work to do:
+ * * purge structures when they are too old
+ * * send announcements
+ */
+static void bla_periodic_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, bla_work);
+ struct hlist_node *node;
+ struct hlist_head *head;
+ struct backbone_gw *backbone_gw;
+ struct hashtable_t *hash;
+ struct hard_iface *primary_if;
+ int i;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ bla_purge_claims(bat_priv, primary_if, 0);
+ bla_purge_backbone_gw(bat_priv, 0);
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto out;
+
+ hash = bat_priv->backbone_hash;
+ if (!hash)
+ goto out;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ if (!compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ continue;
+
+ backbone_gw->lasttime = jiffies;
+
+ bla_send_announce(bat_priv, backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+
+ bla_start_timer(bat_priv);
+}
+
+/* initialize all bla structures */
+int bla_init(struct bat_priv *bat_priv)
+{
+ int i;
+ uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+ struct hard_iface *primary_if;
+
+ bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+
+ /* setting claim destination address */
+ memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
+ bat_priv->claim_dest.type = 0;
+ primary_if = primary_if_get_selected(bat_priv);
+ if (primary_if) {
+ bat_priv->claim_dest.group =
+ htons(crc16(0, primary_if->net_dev->dev_addr,
+ ETH_ALEN));
+ hardif_free_ref(primary_if);
+ } else {
+ bat_priv->claim_dest.group = 0; /* will be set later */
+ }
+
+ /* initialize the duplicate list */
+ for (i = 0; i < DUPLIST_SIZE; i++)
+ bat_priv->bcast_duplist[i].entrytime =
+ jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+ bat_priv->bcast_duplist_curr = 0;
+
+ if (bat_priv->claim_hash)
+ return 1;
+
+ bat_priv->claim_hash = hash_new(128);
+ bat_priv->backbone_hash = hash_new(32);
+
+ if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+ return -1;
+
+ bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+ bla_start_timer(bat_priv);
+ return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bcast_packet: originator mac address
+ * @hdr_size: maximum length of the frame
+ *
+ * check if it is on our broadcast list. Another gateway might
+ * have sent the same packet because it is connected to the same backbone,
+ * so we have to remove this duplicate.
+ *
+ * This is performed by checking the CRC, which will tell us
+ * with a good chance that it is the same packet. If it is furthermore
+ * sent by another host, drop it. We allow equal packets from
+ * the same host however as this might be intended.
+ *
+ **/
+
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+ struct bcast_packet *bcast_packet,
+ int hdr_size)
+{
+ int i, length, curr;
+ uint8_t *content;
+ uint16_t crc;
+ struct bcast_duplist_entry *entry;
+
+ length = hdr_size - sizeof(*bcast_packet);
+ content = (uint8_t *)bcast_packet;
+ content += sizeof(*bcast_packet);
+
+ /* calculate the crc ... */
+ crc = crc16(0, content, length);
+
+ for (i = 0 ; i < DUPLIST_SIZE; i++) {
+ curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+ entry = &bat_priv->bcast_duplist[curr];
+
+ /* we can stop searching if the entry is too old ;
+ * later entries will be even older
+ */
+ if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+ break;
+
+ if (entry->crc != crc)
+ continue;
+
+ if (compare_eth(entry->orig, bcast_packet->orig))
+ continue;
+
+ /* this entry seems to match: same crc, not too old,
+ * and from another gw. therefore return 1 to forbid it.
+ */
+ return 1;
+ }
+ /* not found, add a new entry (overwrite the oldest entry) */
+ curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+ entry = &bat_priv->bcast_duplist[curr];
+ entry->crc = crc;
+ entry->entrytime = jiffies;
+ memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
+ bat_priv->bcast_duplist_curr = curr;
+
+ /* allow it, its the first occurence. */
+ return 0;
+}
+
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator mac address
+ *
+ * check if the originator is a gateway for any VLAN ID.
+ *
+ * returns 1 if it is found, 0 otherwise
+ *
+ */
+
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+{
+ struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct backbone_gw *backbone_gw;
+ int i;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ return 0;
+
+ if (!hash)
+ return 0;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+ if (compare_eth(backbone_gw->orig, orig)) {
+ rcu_read_unlock();
+ return 1;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ return 0;
+}
+
+
+/**
+ * @skb: the frame to be checked
+ * @orig_node: the orig_node of the frame
+ * @hdr_size: maximum length of the frame
+ *
+ * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
+ * if the orig_node is also a gateway on the soft interface, otherwise it
+ * returns 0.
+ *
+ */
+int bla_is_backbone_gw(struct sk_buff *skb,
+ struct orig_node *orig_node, int hdr_size)
+{
+ struct ethhdr *ethhdr;
+ struct vlan_ethhdr *vhdr;
+ struct backbone_gw *backbone_gw;
+ short vid = -1;
+
+ if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
+ return 0;
+
+ /* first, find out the vid. */
+ if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
+ return 0;
+
+ ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
+
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+ if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
+ return 0;
+
+ vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
+ hdr_size);
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ }
+
+ /* see if this originator is a backbone gw for this VLAN */
+
+ backbone_gw = backbone_hash_find(orig_node->bat_priv,
+ orig_node->orig, vid);
+ if (!backbone_gw)
+ return 0;
+
+ backbone_gw_free_ref(backbone_gw);
+ return 1;
+}
+
+/* free all bla structures (for softinterface free or module unload) */
+void bla_free(struct bat_priv *bat_priv)
+{
+ struct hard_iface *primary_if;
+
+ cancel_delayed_work_sync(&bat_priv->bla_work);
+ primary_if = primary_if_get_selected(bat_priv);
+
+ if (bat_priv->claim_hash) {
+ bla_purge_claims(bat_priv, primary_if, 1);
+ hash_destroy(bat_priv->claim_hash);
+ bat_priv->claim_hash = NULL;
+ }
+ if (bat_priv->backbone_hash) {
+ bla_purge_backbone_gw(bat_priv, 1);
+ hash_destroy(bat_priv->backbone_hash);
+ bat_priv->backbone_hash = NULL;
+ }
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_rx avoidance checks if:
+ * * we have to race for a claim
+ * * if the frame is allowed on the LAN
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+ struct ethhdr *ethhdr;
+ struct claim search_claim, *claim = NULL;
+ struct hard_iface *primary_if;
+ int ret;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto handled;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto allow;
+
+
+ if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+ /* don't allow broadcasts while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto handled;
+
+ memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+ search_claim.vid = vid;
+ claim = claim_hash_find(bat_priv, &search_claim);
+
+ if (!claim) {
+ /* possible optimization: race for a claim */
+ /* No claim exists yet, claim it for us!
+ */
+ handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ }
+
+ /* if it is our own claim ... */
+ if (compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
+ /* ... allow it in any case */
+ claim->lasttime = jiffies;
+ goto allow;
+ }
+
+ /* if it is a broadcast ... */
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ /* ... drop it. the responsible gateway is in charge. */
+ goto handled;
+ } else {
+ /* seems the client considers us as its best gateway.
+ * send a claim and update the claim table
+ * immediately.
+ */
+ handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ }
+allow:
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ ret = 0;
+ goto out;
+
+handled:
+ kfree_skb(skb);
+ ret = 1;
+
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+ if (claim)
+ claim_free_ref(claim);
+ return ret;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_tx checks if:
+ * * a claim was received which has to be processed
+ * * the frame is allowed on the mesh
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+ struct ethhdr *ethhdr;
+ struct claim search_claim, *claim = NULL;
+ struct hard_iface *primary_if;
+ int ret = 0;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto allow;
+
+ /* in VLAN case, the mac header might not be set. */
+ skb_reset_mac_header(skb);
+
+ if (bla_process_claim(bat_priv, primary_if, skb))
+ goto handled;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+ /* don't allow broadcasts while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto handled;
+
+ memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+ search_claim.vid = vid;
+
+ claim = claim_hash_find(bat_priv, &search_claim);
+
+ /* if no claim exists, allow it. */
+ if (!claim)
+ goto allow;
+
+ /* check if we are responsible. */
+ if (compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
+ /* if yes, the client has roamed and we have
+ * to unclaim it.
+ */
+ handle_unclaim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ }
+
+ /* check if it is a multicast/broadcast frame */
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ /* drop it. the responsible gateway has forwarded it into
+ * the backbone network.
+ */
+ goto handled;
+ } else {
+ /* we must allow it. at least if we are
+ * responsible for the DESTINATION.
+ */
+ goto allow;
+ }
+allow:
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ ret = 0;
+ goto out;
+handled:
+ ret = 1;
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+ if (claim)
+ claim_free_ref(claim);
+ return ret;
+}
+
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+ struct net_device *net_dev = (struct net_device *)seq->private;
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ struct hashtable_t *hash = bat_priv->claim_hash;
+ struct claim *claim;
+ struct hard_iface *primary_if;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ uint32_t i;
+ bool is_own;
+ int ret = 0;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if) {
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+ net_dev->name);
+ goto out;
+ }
+
+ if (primary_if->if_status != IF_ACTIVE) {
+ ret = seq_printf(seq,
+ "BATMAN mesh %s disabled - primary interface not active\n",
+ net_dev->name);
+ goto out;
+ }
+
+ seq_printf(seq,
+ "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
+ net_dev->name, primary_if->net_dev->dev_addr,
+ ntohs(bat_priv->claim_dest.group));
+ seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
+ "Client", "VID", "Originator", "CRC");
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+ is_own = compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
+ claim->addr, claim->vid,
+ claim->backbone_gw->orig,
+ (is_own ? 'x' : ' '),
+ claim->backbone_gw->crc);
+ }
+ rcu_read_unlock();
+ }
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+ return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 000000000000..e39f93acc28f
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_BLA_H_
+#define _NET_BATMAN_ADV_BLA_H_
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_is_backbone_gw(struct sk_buff *skb,
+ struct orig_node *orig_node, int hdr_size);
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+ struct bcast_packet *bcast_packet, int hdr_size);
+void bla_update_orig_address(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ struct hard_iface *oldif);
+int bla_init(struct bat_priv *bat_priv);
+void bla_free(struct bat_priv *bat_priv);
+
+#define BLA_CRC_INIT 0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
+ short vid)
+{
+ return 0;
+}
+
+static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
+ short vid)
+{
+ return 0;
+}
+
+static inline int bla_is_backbone_gw(struct sk_buff *skb,
+ struct orig_node *orig_node,
+ int hdr_size)
+{
+ return 0;
+}
+
+static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
+ void *offset)
+{
+ return 0;
+}
+
+static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
+ uint8_t *orig)
+{
+ return 0;
+}
+
+static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+ struct bcast_packet *bcast_packet,
+ int hdr_size)
+{
+ return 0;
+}
+
+static inline void bla_update_orig_address(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ struct hard_iface *oldif)
+{
+}
+
+static inline int bla_init(struct bat_priv *bat_priv)
+{
+ return 1;
+}
+
+static inline void bla_free(struct bat_priv *bat_priv)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 6f9b9b78f77d..47f7186dcefc 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -558,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
p++;
/* ...and then we jump over the data */
- if (pkt_len < *p)
+ if (pkt_len < 1 + (*p))
goto out;
- pkt_len -= *p;
- p += (*p);
+ pkt_len -= 1 + (*p);
+ p += 1 + (*p);
}
}
out:
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 377897701a85..dc334fa89847 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,15 +28,10 @@
#include "bat_sysfs.h"
#include "originator.h"
#include "hash.h"
+#include "bridge_loop_avoidance.h"
#include <linux/if_arp.h>
-
-static int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
-
void hardif_free_rcu(struct rcu_head *rcu)
{
struct hard_iface *hard_iface;
@@ -107,7 +102,8 @@ out:
return hard_iface;
}
-static void primary_if_update_addr(struct bat_priv *bat_priv)
+static void primary_if_update_addr(struct bat_priv *bat_priv,
+ struct hard_iface *oldif)
{
struct vis_packet *vis_packet;
struct hard_iface *primary_if;
@@ -122,6 +118,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
memcpy(vis_packet->sender_orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
+ bla_update_orig_address(bat_priv, primary_if, oldif);
out:
if (primary_if)
hardif_free_ref(primary_if);
@@ -140,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
- if (curr_hard_iface)
- hardif_free_ref(curr_hard_iface);
-
if (!new_hard_iface)
- return;
+ goto out;
+
+ bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
+ primary_if_update_addr(bat_priv, curr_hard_iface);
- bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
- primary_if_update_addr(bat_priv);
+out:
+ if (curr_hard_iface)
+ hardif_free_ref(curr_hard_iface);
}
static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -175,9 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev)
net_dev->dev_addr))
continue;
- pr_warning("The newly added mac address (%pM) already exists on: %s\n",
- net_dev->dev_addr, hard_iface->net_dev->name);
- pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
+ pr_warn("The newly added mac address (%pM) already exists on: %s\n",
+ net_dev->dev_addr, hard_iface->net_dev->name);
+ pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
}
rcu_read_unlock();
}
@@ -230,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+ bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
hard_iface->if_status = IF_TO_BE_ACTIVATED;
/**
@@ -300,22 +298,17 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
if (!softif_is_valid(soft_iface)) {
pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
soft_iface->name);
- dev_put(soft_iface);
ret = -EINVAL;
- goto err;
+ goto err_dev;
}
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
-
- if (!hard_iface->packet_buff) {
- bat_err(hard_iface->soft_iface,
- "Can't add interface packet (%s): out of memory\n",
- hard_iface->net_dev->name);
+ ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
+ if (ret < 0) {
ret = -ENOMEM;
- goto err;
+ goto err_dev;
}
hard_iface->if_num = bat_priv->num_ifaces;
@@ -328,7 +321,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype);
- atomic_set(&hard_iface->seqno, 1);
atomic_set(&hard_iface->frag_seqno, 1);
bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
hard_iface->net_dev->name);
@@ -360,6 +352,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
out:
return 0;
+err_dev:
+ dev_put(soft_iface);
err:
hardif_free_ref(hard_iface);
return ret;
@@ -394,8 +388,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
hardif_free_ref(new_if);
}
- kfree(hard_iface->packet_buff);
- hard_iface->packet_buff = NULL;
+ bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
hard_iface->if_status = IF_NOT_IN_USE;
/* delete all references to this hard_iface */
@@ -447,6 +440,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
check_known_mac_addr(hard_iface->net_dev);
list_add_tail_rcu(&hard_iface->list, &hardif_list);
+ /**
+ * This can't be called via a bat_priv callback because
+ * we have no bat_priv yet.
+ */
+ atomic_set(&hard_iface->seqno, 1);
+ hard_iface->packet_buff = NULL;
+
return hard_iface;
free_if:
@@ -524,14 +524,14 @@ static int hard_if_event(struct notifier_block *this,
check_known_mac_addr(hard_iface->net_dev);
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+ bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto hardif_put;
if (hard_iface == primary_if)
- primary_if_update_addr(bat_priv);
+ primary_if_update_addr(bat_priv, NULL);
break;
default:
break;
@@ -545,114 +545,6 @@ out:
return NOTIFY_DONE;
}
-/* incoming packets with the batman ethertype received on any active hard
- * interface */
-static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev)
-{
- struct bat_priv *bat_priv;
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *hard_iface;
- int ret;
-
- hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
- skb = skb_share_check(skb, GFP_ATOMIC);
-
- /* skb was released by skb_share_check() */
- if (!skb)
- goto err_out;
-
- /* packet should hold at least type and version */
- if (unlikely(!pskb_may_pull(skb, 2)))
- goto err_free;
-
- /* expect a valid ethernet header here. */
- if (unlikely(skb->mac_len != sizeof(struct ethhdr) ||
- !skb_mac_header(skb)))
- goto err_free;
-
- if (!hard_iface->soft_iface)
- goto err_free;
-
- bat_priv = netdev_priv(hard_iface->soft_iface);
-
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
- goto err_free;
-
- /* discard frames on not active interfaces */
- if (hard_iface->if_status != IF_ACTIVE)
- goto err_free;
-
- batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
-
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
- goto err_free;
- }
-
- /* all receive handlers return whether they received or reused
- * the supplied skb. if not, we have to free the skb. */
-
- switch (batman_ogm_packet->header.packet_type) {
- /* batman originator packet */
- case BAT_OGM:
- ret = recv_bat_ogm_packet(skb, hard_iface);
- break;
-
- /* batman icmp packet */
- case BAT_ICMP:
- ret = recv_icmp_packet(skb, hard_iface);
- break;
-
- /* unicast packet */
- case BAT_UNICAST:
- ret = recv_unicast_packet(skb, hard_iface);
- break;
-
- /* fragmented unicast packet */
- case BAT_UNICAST_FRAG:
- ret = recv_ucast_frag_packet(skb, hard_iface);
- break;
-
- /* broadcast packet */
- case BAT_BCAST:
- ret = recv_bcast_packet(skb, hard_iface);
- break;
-
- /* vis packet */
- case BAT_VIS:
- ret = recv_vis_packet(skb, hard_iface);
- break;
- /* Translation table query (request or response) */
- case BAT_TT_QUERY:
- ret = recv_tt_query(skb, hard_iface);
- break;
- /* Roaming advertisement */
- case BAT_ROAM_ADV:
- ret = recv_roam_adv(skb, hard_iface);
- break;
- default:
- ret = NET_RX_DROP;
- }
-
- if (ret == NET_RX_DROP)
- kfree_skb(skb);
-
- /* return NET_RX_SUCCESS in any case as we
- * most probably dropped the packet for
- * routing-logical reasons. */
-
- return NET_RX_SUCCESS;
-
-err_free:
- kfree_skb(skb);
-err_out:
- return NET_RX_DROP;
-}
-
/* This function returns true if the interface represented by ifindex is a
* 802.11 wireless device */
bool is_wifi_iface(int ifindex)
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b87518edcef9..2e98a57f3407 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -175,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (len >= sizeof(struct icmp_packet_rr))
packet_len = sizeof(struct icmp_packet_rr);
- skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
+ skb = dev_alloc_skb(packet_len + ETH_HLEN);
if (!skb) {
len = -ENOMEM;
goto out;
}
- skb_reserve(skb, sizeof(struct ethhdr));
+ skb_reserve(skb, ETH_HLEN);
icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 6d51caaf8cec..083a2993efe4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
#include "translation-table.h"
#include "hard-interface.h"
#include "gateway_client.h"
+#include "bridge_loop_avoidance.h"
#include "vis.h"
#include "hash.h"
#include "bat_algo.h"
@@ -38,6 +39,7 @@
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked */
struct list_head hardif_list;
+static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
char bat_routing_algo[20] = "BATMAN IV";
static struct hlist_head bat_algo_list;
@@ -45,11 +47,15 @@ unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct workqueue_struct *bat_event_workqueue;
+static void recv_handler_init(void);
+
static int __init batman_init(void)
{
INIT_LIST_HEAD(&hardif_list);
INIT_HLIST_HEAD(&bat_algo_list);
+ recv_handler_init();
+
bat_iv_init();
/* the name should not be longer than 10 chars - see
@@ -96,13 +102,10 @@ int mesh_init(struct net_device *soft_iface)
spin_lock_init(&bat_priv->gw_list_lock);
spin_lock_init(&bat_priv->vis_hash_lock);
spin_lock_init(&bat_priv->vis_list_lock);
- spin_lock_init(&bat_priv->softif_neigh_lock);
- spin_lock_init(&bat_priv->softif_neigh_vid_lock);
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
INIT_HLIST_HEAD(&bat_priv->gw_list);
- INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
INIT_LIST_HEAD(&bat_priv->tt_changes_list);
INIT_LIST_HEAD(&bat_priv->tt_req_list);
INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -118,6 +121,9 @@ int mesh_init(struct net_device *soft_iface)
if (vis_init(bat_priv) < 1)
goto err;
+ if (bla_init(bat_priv) < 1)
+ goto err;
+
atomic_set(&bat_priv->gw_reselect, 0);
atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
goto end;
@@ -145,7 +151,7 @@ void mesh_free(struct net_device *soft_iface)
tt_free(bat_priv);
- softif_neigh_purge(bat_priv);
+ bla_free(bat_priv);
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
}
@@ -178,6 +184,120 @@ int is_my_mac(const uint8_t *addr)
return 0;
}
+static int recv_unhandled_packet(struct sk_buff *skb,
+ struct hard_iface *recv_if)
+{
+ return NET_RX_DROP;
+}
+
+/* incoming packets with the batman ethertype received on any active hard
+ * interface
+ */
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+{
+ struct bat_priv *bat_priv;
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct hard_iface *hard_iface;
+ uint8_t idx;
+ int ret;
+
+ hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ /* skb was released by skb_share_check() */
+ if (!skb)
+ goto err_out;
+
+ /* packet should hold at least type and version */
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ goto err_free;
+
+ /* expect a valid ethernet header here. */
+ if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
+ goto err_free;
+
+ if (!hard_iface->soft_iface)
+ goto err_free;
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ goto err_free;
+
+ /* discard frames on not active interfaces */
+ if (hard_iface->if_status != IF_ACTIVE)
+ goto err_free;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
+
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batman_ogm_packet->header.version);
+ goto err_free;
+ }
+
+ /* all receive handlers return whether they received or reused
+ * the supplied skb. if not, we have to free the skb.
+ */
+ idx = batman_ogm_packet->header.packet_type;
+ ret = (*recv_packet_handler[idx])(skb, hard_iface);
+
+ if (ret == NET_RX_DROP)
+ kfree_skb(skb);
+
+ /* return NET_RX_SUCCESS in any case as we
+ * most probably dropped the packet for
+ * routing-logical reasons.
+ */
+ return NET_RX_SUCCESS;
+
+err_free:
+ kfree_skb(skb);
+err_out:
+ return NET_RX_DROP;
+}
+
+static void recv_handler_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
+ recv_packet_handler[i] = recv_unhandled_packet;
+
+ /* batman icmp packet */
+ recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
+ /* unicast packet */
+ recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
+ /* fragmented unicast packet */
+ recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
+ /* broadcast packet */
+ recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
+ /* vis packet */
+ recv_packet_handler[BAT_VIS] = recv_vis_packet;
+ /* Translation table query (request or response) */
+ recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
+ /* Roaming advertisement */
+ recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
+}
+
+int recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct hard_iface *))
+{
+ if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
+ return -EBUSY;
+
+ recv_packet_handler[packet_type] = recv_handler;
+ return 0;
+}
+
+void recv_handler_unregister(uint8_t packet_type)
+{
+ recv_packet_handler[packet_type] = recv_unhandled_packet;
+}
+
static struct bat_algo_ops *bat_algo_get(char *name)
{
struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
@@ -207,12 +327,12 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
}
/* all algorithms must implement all ops (for now) */
- if (!bat_algo_ops->bat_ogm_init ||
- !bat_algo_ops->bat_ogm_init_primary ||
- !bat_algo_ops->bat_ogm_update_mac ||
+ if (!bat_algo_ops->bat_iface_enable ||
+ !bat_algo_ops->bat_iface_disable ||
+ !bat_algo_ops->bat_iface_update_mac ||
+ !bat_algo_ops->bat_primary_iface_set ||
!bat_algo_ops->bat_ogm_schedule ||
- !bat_algo_ops->bat_ogm_emit ||
- !bat_algo_ops->bat_ogm_receive) {
+ !bat_algo_ops->bat_ogm_emit) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
goto out;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 94fa1c2393a6..f4a3ec003479 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2012.1.0"
+#define SOURCE_VERSION "2012.2.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -65,7 +65,7 @@
#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
+#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -80,8 +80,12 @@
#define MAX_AGGREGATION_BYTES 512
#define MAX_AGGREGATION_MS 100
-#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
+#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
+#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
+#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
+#define DUPLIST_SIZE 16
+#define DUPLIST_TIMEOUT 500 /* 500 ms */
/* don't reset again within 30 seconds */
#define RESET_PROTECTION_MS 30000
#define EXPECTED_SEQNO_RANGE 65536
@@ -119,7 +123,8 @@ enum dbg_level {
DBG_BATMAN = 1 << 0,
DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
DBG_TT = 1 << 2, /* translation table operations */
- DBG_ALL = 7
+ DBG_BLA = 1 << 3, /* bridge loop avoidance */
+ DBG_ALL = 15
};
/* Kernel headers */
@@ -150,6 +155,12 @@ void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
int is_my_mac(const uint8_t *addr);
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev);
+int recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct hard_iface *));
+void recv_handler_unregister(uint8_t packet_type);
int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
int bat_algo_select(struct bat_priv *bat_priv, char *name);
int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 43c0a4f1399e..41147942ba53 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,13 +28,15 @@
#include "hard-interface.h"
#include "unicast.h"
#include "soft-interface.h"
+#include "bridge_loop_avoidance.h"
static void purge_orig(struct work_struct *work);
static void start_purge_timer(struct bat_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
- queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
+ queue_delayed_work(bat_event_workqueue,
+ &bat_priv->orig_work, msecs_to_jiffies(1000));
}
/* returns 1 if they are the same originator */
@@ -83,35 +85,30 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
return router;
}
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const uint8_t *neigh,
- struct hard_iface *if_incoming)
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ uint32_t seqno)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct neigh_node *neigh_node;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new last-hop neighbor of originator\n");
-
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
if (!neigh_node)
- return NULL;
+ goto out;
INIT_HLIST_NODE(&neigh_node->list);
- INIT_LIST_HEAD(&neigh_node->bonding_list);
- spin_lock_init(&neigh_node->tq_lock);
- memcpy(neigh_node->addr, neigh, ETH_ALEN);
- neigh_node->orig_node = orig_neigh_node;
- neigh_node->if_incoming = if_incoming;
+ memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
+ spin_lock_init(&neigh_node->lq_update_lock);
/* extra reference for return */
atomic_set(&neigh_node->refcount, 2);
- spin_lock_bh(&orig_node->neigh_list_lock);
- hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
- spin_unlock_bh(&orig_node->neigh_list_lock);
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM, initial seqno %d\n",
+ neigh_addr, seqno);
+
+out:
return neigh_node;
}
@@ -273,6 +270,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
struct hlist_node *node, *node_tmp;
struct neigh_node *neigh_node;
bool neigh_purged = false;
+ unsigned long last_seen;
*best_neigh_node = NULL;
@@ -282,11 +280,13 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
- if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) ||
+ if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
(neigh_node->if_incoming->if_status == IF_INACTIVE) ||
(neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
(neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
+ last_seen = neigh_node->last_seen;
+
if ((neigh_node->if_incoming->if_status ==
IF_INACTIVE) ||
(neigh_node->if_incoming->if_status ==
@@ -299,9 +299,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
neigh_node->if_incoming->net_dev->name);
else
bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n",
+ "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
orig_node->orig, neigh_node->addr,
- (neigh_node->last_valid / HZ));
+ jiffies_to_msecs(last_seen));
neigh_purged = true;
@@ -324,10 +324,11 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
{
struct neigh_node *best_neigh_node;
- if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) {
+ if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
bat_dbg(DBG_BATMAN, bat_priv,
- "Originator timeout: originator %pM, last_valid %lu\n",
- orig_node->orig, (orig_node->last_valid / HZ));
+ "Originator timeout: originator %pM, last_seen %u\n",
+ orig_node->orig,
+ jiffies_to_msecs(orig_node->last_seen));
return true;
} else {
if (purge_orig_neighbors(bat_priv, orig_node,
@@ -375,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv)
gw_node_purge(bat_priv);
gw_election(bat_priv);
-
- softif_neigh_purge(bat_priv);
}
static void purge_orig(struct work_struct *work)
@@ -447,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
goto next;
last_seen_secs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) / 1000;
+ orig_node->last_seen) / 1000;
last_seen_msecs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) % 1000;
+ orig_node->last_seen) % 1000;
seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
orig_node->orig, last_seen_secs,
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 3fe2eda85652..f74d0d693359 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv);
void purge_orig_ref(struct bat_priv *bat_priv);
void orig_node_free_ref(struct orig_node *orig_node);
struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const uint8_t *neigh,
- struct hard_iface *if_incoming);
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ uint32_t seqno);
void neigh_node_free_ref(struct neigh_node *neigh_node);
struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
int orig_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 441f3db1bd91..0ee1af770798 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,7 +25,7 @@
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
enum bat_packettype {
- BAT_OGM = 0x01,
+ BAT_IV_OGM = 0x01,
BAT_ICMP = 0x02,
BAT_UNICAST = 0x03,
BAT_BCAST = 0x04,
@@ -38,7 +38,8 @@ enum bat_packettype {
/* this file is included by batctl which needs these defines */
#define COMPAT_VERSION 14
-enum batman_flags {
+enum batman_iv_flags {
+ NOT_BEST_NEXT_HOP = 1 << 3,
PRIMARIES_FIRST_HOP = 1 << 4,
VIS_SERVER = 1 << 5,
DIRECTLINK = 1 << 6
@@ -90,6 +91,23 @@ enum tt_client_flags {
TT_CLIENT_PENDING = 1 << 10
};
+/* claim frame types for the bridge loop avoidance */
+enum bla_claimframe {
+ CLAIM_TYPE_ADD = 0x00,
+ CLAIM_TYPE_DEL = 0x01,
+ CLAIM_TYPE_ANNOUNCE = 0x02,
+ CLAIM_TYPE_REQUEST = 0x03
+};
+
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct bla_claim_dst {
+ uint8_t magic[3]; /* FF:43:05 */
+ uint8_t type; /* bla_claimframe */
+ uint16_t group; /* group id */
+} __packed;
+
struct batman_header {
uint8_t packet_type;
uint8_t version; /* batman version field */
@@ -100,8 +118,8 @@ struct batman_ogm_packet {
struct batman_header header;
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
uint32_t seqno;
- uint8_t orig[6];
- uint8_t prev_sender[6];
+ uint8_t orig[ETH_ALEN];
+ uint8_t prev_sender[ETH_ALEN];
uint8_t gw_flags; /* flags related to gateway class */
uint8_t tq;
uint8_t tt_num_changes;
@@ -109,13 +127,13 @@ struct batman_ogm_packet {
uint16_t tt_crc;
} __packed;
-#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
+#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
struct icmp_packet {
struct batman_header header;
uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[6];
- uint8_t orig[6];
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
uint16_t seqno;
uint8_t uid;
uint8_t reserved;
@@ -128,8 +146,8 @@ struct icmp_packet {
struct icmp_packet_rr {
struct batman_header header;
uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[6];
- uint8_t orig[6];
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
uint16_t seqno;
uint8_t uid;
uint8_t rr_cur;
@@ -139,16 +157,16 @@ struct icmp_packet_rr {
struct unicast_packet {
struct batman_header header;
uint8_t ttvn; /* destination translation table version number */
- uint8_t dest[6];
+ uint8_t dest[ETH_ALEN];
} __packed;
struct unicast_frag_packet {
struct batman_header header;
uint8_t ttvn; /* destination translation table version number */
- uint8_t dest[6];
+ uint8_t dest[ETH_ALEN];
uint8_t flags;
uint8_t align;
- uint8_t orig[6];
+ uint8_t orig[ETH_ALEN];
uint16_t seqno;
} __packed;
@@ -156,7 +174,7 @@ struct bcast_packet {
struct batman_header header;
uint8_t reserved;
uint32_t seqno;
- uint8_t orig[6];
+ uint8_t orig[ETH_ALEN];
} __packed;
struct vis_packet {
@@ -165,9 +183,9 @@ struct vis_packet {
uint32_t seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
uint8_t reserved;
- uint8_t vis_orig[6]; /* originator that announces its neighbors */
- uint8_t target_orig[6]; /* who should receive this packet */
- uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
+ uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
+ uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */
+ uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
} __packed;
struct tt_query_packet {
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7f8e15899417..840e2c64a301 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,10 @@
#include "originator.h"
#include "vis.h"
#include "unicast.h"
+#include "bridge_loop_avoidance.h"
+
+static int route_unicast_packet(struct sk_buff *skb,
+ struct hard_iface *recv_if);
void slide_own_bcast_window(struct hard_iface *hard_iface)
{
@@ -52,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
bit_get_packet(bat_priv, word, 1, 0);
orig_node->bcast_own_sum[hard_iface->if_num] =
- bit_packet_count(word);
+ bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
}
rcu_read_unlock();
@@ -230,51 +234,46 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
{
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
(seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
-
- *last_reset = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "old packet received, start protection\n");
-
- return 0;
- } else {
+ if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
return 1;
- }
+
+ *last_reset = jiffies;
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "old packet received, start protection\n");
}
+
return 0;
}
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
+bool check_management_packet(struct sk_buff *skb,
+ struct hard_iface *hard_iface,
+ int header_len)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
- return NET_RX_DROP;
+ if (unlikely(!pskb_may_pull(skb, header_len)))
+ return false;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
- return NET_RX_DROP;
+ return false;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
- return NET_RX_DROP;
+ return false;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, 0) < 0)
- return NET_RX_DROP;
+ return false;
/* keep skb linear */
if (skb_linearize(skb) < 0)
- return NET_RX_DROP;
-
- bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
+ return false;
- kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return true;
}
static int recv_my_icmp_packet(struct bat_priv *bat_priv,
@@ -309,7 +308,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
goto out;
/* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -364,7 +363,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
goto out;
/* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
icmp_packet = (struct icmp_packet *)skb->data;
@@ -450,7 +449,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -669,6 +668,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
if (!is_my_mac(roam_adv_packet->dst))
return route_unicast_packet(skb, recv_if);
+ /* check if it is a backbone gateway. we don't accept
+ * roaming advertisement from it, as it has the same
+ * entries as we have.
+ */
+ if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+ goto out;
+
orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
if (!orig_node)
goto out;
@@ -798,7 +804,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
return 0;
}
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node = NULL;
@@ -830,7 +836,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
unicast_packet = (struct unicast_packet *)skb->data;
@@ -907,12 +913,20 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
/* Check whether I have to reroute the packet */
if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
- /* Linearize the skb before accessing it */
- if (skb_linearize(skb) < 0)
+ /* check if there is enough data before accessing it */
+ if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
+ ETH_HLEN) < 0)
return 0;
ethhdr = (struct ethhdr *)(skb->data +
sizeof(struct unicast_packet));
+
+ /* we don't have an updated route for this client, so we should
+ * not try to reroute the packet!!
+ */
+ if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ return 1;
+
orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
if (!orig_node) {
@@ -1047,8 +1061,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
spin_lock_bh(&orig_node->bcast_seqno_lock);
/* check whether the packet is a duplicate */
- if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
- ntohl(bcast_packet->seqno)))
+ if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+ ntohl(bcast_packet->seqno)))
goto spin_unlock;
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1065,9 +1079,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
spin_unlock_bh(&orig_node->bcast_seqno_lock);
+ /* check whether this has been sent by another originator before */
+ if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+ goto out;
+
/* rebroadcast packet */
add_bcast_packet_to_list(bat_priv, skb, 1);
+ /* don't hand the broadcast up if it is from an originator
+ * from the same backbone.
+ */
+ if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+ goto out;
+
/* broadcast for me */
interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 92ac100d83da..d6bbbebb6567 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,15 +23,16 @@
#define _NET_BATMAN_ADV_ROUTING_H_
void slide_own_bcast_window(struct hard_iface *hard_iface);
+bool check_management_packet(struct sk_buff *skb,
+ struct hard_iface *hard_iface,
+ int header_len);
void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
struct neigh_node *neigh_node);
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index af7a6741a685..f47299f22c68 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -45,13 +45,13 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
goto send_skb_err;
if (!(hard_iface->net_dev->flags & IFF_UP)) {
- pr_warning("Interface %s is not up - can't send packet via that interface!\n",
- hard_iface->net_dev->name);
+ pr_warn("Interface %s is not up - can't send packet via that interface!\n",
+ hard_iface->net_dev->name);
goto send_skb_err;
}
/* push to the ethernet header. */
- if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
+ if (my_skb_head_push(skb, ETH_HLEN) < 0)
goto send_skb_err;
skb_reset_mac_header(skb);
@@ -87,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
/* keep old buffer if kmalloc should fail */
if (new_buff) {
memcpy(new_buff, hard_iface->packet_buff,
- BATMAN_OGM_LEN);
+ BATMAN_OGM_HLEN);
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = new_buff;
@@ -101,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
{
int new_len;
- new_len = BATMAN_OGM_LEN +
+ new_len = BATMAN_OGM_HLEN +
tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented */
if (new_len > hard_iface->soft_iface->mtu)
- new_len = BATMAN_OGM_LEN;
+ new_len = BATMAN_OGM_HLEN;
realloc_packet_buffer(hard_iface, new_len);
@@ -117,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
return tt_changes_fill_buffer(bat_priv,
- hard_iface->packet_buff + BATMAN_OGM_LEN,
- hard_iface->packet_len - BATMAN_OGM_LEN);
+ hard_iface->packet_buff + BATMAN_OGM_HLEN,
+ hard_iface->packet_len - BATMAN_OGM_HLEN);
}
static int reset_packet_buffer(struct bat_priv *bat_priv,
struct hard_iface *hard_iface)
{
- realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
+ realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
return 0;
}
@@ -292,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
_add_bcast_packet_to_list(bat_priv, forw_packet,
- ((5 * HZ) / 1000));
+ msecs_to_jiffies(5));
return;
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a5590f4193f1..6e2530b02043 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,6 +36,7 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include "unicast.h"
+#include "bridge_loop_avoidance.h"
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,439 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
return 0;
}
-static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
-{
- if (atomic_dec_and_test(&softif_neigh->refcount))
- kfree_rcu(softif_neigh, rcu);
-}
-
-static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
-{
- struct softif_neigh_vid *softif_neigh_vid;
- struct softif_neigh *softif_neigh;
- struct hlist_node *node, *node_tmp;
- struct bat_priv *bat_priv;
-
- softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
- bat_priv = softif_neigh_vid->bat_priv;
-
- spin_lock_bh(&bat_priv->softif_neigh_lock);
- hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
- &softif_neigh_vid->softif_neigh_list, list) {
- hlist_del_rcu(&softif_neigh->list);
- softif_neigh_free_ref(softif_neigh);
- }
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
- kfree(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
-{
- if (atomic_dec_and_test(&softif_neigh_vid->refcount))
- call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
-}
-
-static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
- short vid)
-{
- struct softif_neigh_vid *softif_neigh_vid;
- struct hlist_node *node;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
- &bat_priv->softif_neigh_vids, list) {
- if (softif_neigh_vid->vid != vid)
- continue;
-
- if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
- continue;
-
- goto out;
- }
-
- softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
- if (!softif_neigh_vid)
- goto out;
-
- softif_neigh_vid->vid = vid;
- softif_neigh_vid->bat_priv = bat_priv;
-
- /* initialize with 2 - caller decrements counter by one */
- atomic_set(&softif_neigh_vid->refcount, 2);
- INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
- INIT_HLIST_NODE(&softif_neigh_vid->list);
- spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
- hlist_add_head_rcu(&softif_neigh_vid->list,
- &bat_priv->softif_neigh_vids);
- spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-out:
- rcu_read_unlock();
- return softif_neigh_vid;
-}
-
-static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
- const uint8_t *addr, short vid)
-{
- struct softif_neigh_vid *softif_neigh_vid;
- struct softif_neigh *softif_neigh = NULL;
- struct hlist_node *node;
-
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
- if (!softif_neigh_vid)
- goto out;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(softif_neigh, node,
- &softif_neigh_vid->softif_neigh_list,
- list) {
- if (!compare_eth(softif_neigh->addr, addr))
- continue;
-
- if (!atomic_inc_not_zero(&softif_neigh->refcount))
- continue;
-
- softif_neigh->last_seen = jiffies;
- goto unlock;
- }
-
- softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
- if (!softif_neigh)
- goto unlock;
-
- memcpy(softif_neigh->addr, addr, ETH_ALEN);
- softif_neigh->last_seen = jiffies;
- /* initialize with 2 - caller decrements counter by one */
- atomic_set(&softif_neigh->refcount, 2);
-
- INIT_HLIST_NODE(&softif_neigh->list);
- spin_lock_bh(&bat_priv->softif_neigh_lock);
- hlist_add_head_rcu(&softif_neigh->list,
- &softif_neigh_vid->softif_neigh_list);
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-unlock:
- rcu_read_unlock();
-out:
- if (softif_neigh_vid)
- softif_neigh_vid_free_ref(softif_neigh_vid);
- return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_get_selected(
- struct softif_neigh_vid *softif_neigh_vid)
-{
- struct softif_neigh *softif_neigh;
-
- rcu_read_lock();
- softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
- if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
- softif_neigh = NULL;
-
- rcu_read_unlock();
- return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_vid_get_selected(
- struct bat_priv *bat_priv,
- short vid)
-{
- struct softif_neigh_vid *softif_neigh_vid;
- struct softif_neigh *softif_neigh = NULL;
-
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
- if (!softif_neigh_vid)
- goto out;
-
- softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-out:
- if (softif_neigh_vid)
- softif_neigh_vid_free_ref(softif_neigh_vid);
- return softif_neigh;
-}
-
-static void softif_neigh_vid_select(struct bat_priv *bat_priv,
- struct softif_neigh *new_neigh,
- short vid)
-{
- struct softif_neigh_vid *softif_neigh_vid;
- struct softif_neigh *curr_neigh;
-
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
- if (!softif_neigh_vid)
- goto out;
-
- spin_lock_bh(&bat_priv->softif_neigh_lock);
-
- if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
- new_neigh = NULL;
-
- curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
- 1);
- rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
-
- if ((curr_neigh) && (!new_neigh))
- bat_dbg(DBG_ROUTES, bat_priv,
- "Removing mesh exit point on vid: %d (prev: %pM).\n",
- vid, curr_neigh->addr);
- else if ((curr_neigh) && (new_neigh))
- bat_dbg(DBG_ROUTES, bat_priv,
- "Changing mesh exit point on vid: %d from %pM to %pM.\n",
- vid, curr_neigh->addr, new_neigh->addr);
- else if ((!curr_neigh) && (new_neigh))
- bat_dbg(DBG_ROUTES, bat_priv,
- "Setting mesh exit point on vid: %d to %pM.\n",
- vid, new_neigh->addr);
-
- if (curr_neigh)
- softif_neigh_free_ref(curr_neigh);
-
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-out:
- if (softif_neigh_vid)
- softif_neigh_vid_free_ref(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
- struct softif_neigh_vid *softif_neigh_vid)
-{
- struct softif_neigh *curr_neigh;
- struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
- struct hard_iface *primary_if = NULL;
- struct hlist_node *node;
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* find new softif_neigh immediately to avoid temporary loops */
- rcu_read_lock();
- curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
- hlist_for_each_entry_rcu(softif_neigh_tmp, node,
- &softif_neigh_vid->softif_neigh_list,
- list) {
- if (softif_neigh_tmp == curr_neigh)
- continue;
-
- /* we got a neighbor but its mac is 'bigger' than ours */
- if (memcmp(primary_if->net_dev->dev_addr,
- softif_neigh_tmp->addr, ETH_ALEN) < 0)
- continue;
-
- if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
- continue;
-
- softif_neigh = softif_neigh_tmp;
- goto unlock;
- }
-
-unlock:
- rcu_read_unlock();
-out:
- softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
-
- if (primary_if)
- hardif_free_ref(primary_if);
- if (softif_neigh)
- softif_neigh_free_ref(softif_neigh);
-}
-
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct softif_neigh_vid *softif_neigh_vid;
- struct softif_neigh *softif_neigh;
- struct hard_iface *primary_if;
- struct hlist_node *node, *node_tmp;
- struct softif_neigh *curr_softif_neigh;
- int ret = 0, last_seen_secs, last_seen_msecs;
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if) {
- ret = seq_printf(seq,
- "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
- net_dev->name);
- goto out;
- }
-
- if (primary_if->if_status != IF_ACTIVE) {
- ret = seq_printf(seq,
- "BATMAN mesh %s disabled - primary interface not active\n",
- net_dev->name);
- goto out;
- }
-
- seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
- &bat_priv->softif_neigh_vids, list) {
- seq_printf(seq, " %-15s %s on vid: %d\n",
- "Originator", "last-seen", softif_neigh_vid->vid);
-
- curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-
- hlist_for_each_entry_rcu(softif_neigh, node_tmp,
- &softif_neigh_vid->softif_neigh_list,
- list) {
- last_seen_secs = jiffies_to_msecs(jiffies -
- softif_neigh->last_seen) / 1000;
- last_seen_msecs = jiffies_to_msecs(jiffies -
- softif_neigh->last_seen) % 1000;
- seq_printf(seq, "%s %pM %3i.%03is\n",
- curr_softif_neigh == softif_neigh
- ? "=>" : " ", softif_neigh->addr,
- last_seen_secs, last_seen_msecs);
- }
-
- if (curr_softif_neigh)
- softif_neigh_free_ref(curr_softif_neigh);
-
- seq_printf(seq, "\n");
- }
- rcu_read_unlock();
-
-out:
- if (primary_if)
- hardif_free_ref(primary_if);
- return ret;
-}
-
-void softif_neigh_purge(struct bat_priv *bat_priv)
-{
- struct softif_neigh *softif_neigh, *curr_softif_neigh;
- struct softif_neigh_vid *softif_neigh_vid;
- struct hlist_node *node, *node_tmp, *node_tmp2;
- int do_deselect;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
- &bat_priv->softif_neigh_vids, list) {
- if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
- continue;
-
- curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
- do_deselect = 0;
-
- spin_lock_bh(&bat_priv->softif_neigh_lock);
- hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
- &softif_neigh_vid->softif_neigh_list,
- list) {
- if ((!has_timed_out(softif_neigh->last_seen,
- SOFTIF_NEIGH_TIMEOUT)) &&
- (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
- continue;
-
- if (curr_softif_neigh == softif_neigh) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Current mesh exit point on vid: %d '%pM' vanished.\n",
- softif_neigh_vid->vid,
- softif_neigh->addr);
- do_deselect = 1;
- }
-
- hlist_del_rcu(&softif_neigh->list);
- softif_neigh_free_ref(softif_neigh);
- }
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
- /* soft_neigh_vid_deselect() needs to acquire the
- * softif_neigh_lock */
- if (do_deselect)
- softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
-
- if (curr_softif_neigh)
- softif_neigh_free_ref(curr_softif_neigh);
-
- softif_neigh_vid_free_ref(softif_neigh_vid);
- }
- rcu_read_unlock();
-
- spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
- hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
- &bat_priv->softif_neigh_vids, list) {
- if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
- continue;
-
- hlist_del_rcu(&softif_neigh_vid->list);
- softif_neigh_vid_free_ref(softif_neigh_vid);
- }
- spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-}
-
-static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
- short vid)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct batman_ogm_packet *batman_ogm_packet;
- struct softif_neigh *softif_neigh = NULL;
- struct hard_iface *primary_if = NULL;
- struct softif_neigh *curr_softif_neigh = NULL;
-
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
- batman_ogm_packet = (struct batman_ogm_packet *)
- (skb->data + ETH_HLEN + VLAN_HLEN);
- else
- batman_ogm_packet = (struct batman_ogm_packet *)
- (skb->data + ETH_HLEN);
-
- if (batman_ogm_packet->header.version != COMPAT_VERSION)
- goto out;
-
- if (batman_ogm_packet->header.packet_type != BAT_OGM)
- goto out;
-
- if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
- goto out;
-
- if (is_my_mac(batman_ogm_packet->orig))
- goto out;
-
- softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
- if (!softif_neigh)
- goto out;
-
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
- if (curr_softif_neigh == softif_neigh)
- goto out;
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* we got a neighbor but its mac is 'bigger' than ours */
- if (memcmp(primary_if->net_dev->dev_addr,
- softif_neigh->addr, ETH_ALEN) < 0)
- goto out;
-
- /* close own batX device and use softif_neigh as exit node */
- if (!curr_softif_neigh) {
- softif_neigh_vid_select(bat_priv, softif_neigh, vid);
- goto out;
- }
-
- /* switch to new 'smallest neighbor' */
- if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
- softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-
-out:
- kfree_skb(skb);
- if (softif_neigh)
- softif_neigh_free_ref(softif_neigh);
- if (curr_softif_neigh)
- softif_neigh_free_ref(curr_softif_neigh);
- if (primary_if)
- hardif_free_ref(primary_if);
- return;
-}
-
static int interface_open(struct net_device *dev)
{
netif_start_queue(dev);
@@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
struct hard_iface *primary_if = NULL;
struct bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr;
- struct softif_neigh *curr_softif_neigh = NULL;
+ static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
+ 0x00};
unsigned int header_len = 0;
int data_len = skb->len, ret;
- short vid = -1;
+ short vid __maybe_unused = -1;
bool do_bcast = false;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* fall through */
case ETH_P_BATMAN:
- softif_batman_recv(skb, soft_iface, vid);
- goto end;
+ goto dropped;
}
- /**
- * if we have a another chosen mesh exit node in range
- * it will transport the packets to the mesh
- */
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
- if (curr_softif_neigh)
+ if (bla_tx(bat_priv, skb, vid))
goto dropped;
/* Register the client MAC in the transtable */
tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+ /* don't accept stp packets. STP does not help in meshes.
+ * better use the bridge loop avoidance ...
+ */
+ if (compare_eth(ethhdr->h_dest, stp_addr))
+ goto dropped;
+
if (is_multicast_ether_addr(ethhdr->h_dest)) {
do_bcast = true;
@@ -675,8 +244,6 @@ dropped:
dropped_freed:
bat_priv->stats.tx_dropped++;
end:
- if (curr_softif_neigh)
- softif_neigh_free_ref(curr_softif_neigh);
if (primary_if)
hardif_free_ref(primary_if);
return NETDEV_TX_OK;
@@ -687,12 +254,9 @@ void interface_rx(struct net_device *soft_iface,
int hdr_size)
{
struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct unicast_packet *unicast_packet;
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
- struct softif_neigh *curr_softif_neigh = NULL;
- short vid = -1;
- int ret;
+ short vid __maybe_unused = -1;
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@ void interface_rx(struct net_device *soft_iface,
goto dropped;
}
- /**
- * if we have a another chosen mesh exit node in range
- * it will transport the packets to the non-mesh network
- */
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
- if (curr_softif_neigh) {
- skb_push(skb, hdr_size);
- unicast_packet = (struct unicast_packet *)skb->data;
-
- if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
- (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
- goto dropped;
-
- skb_reset_mac_header(skb);
-
- memcpy(unicast_packet->dest,
- curr_softif_neigh->addr, ETH_ALEN);
- ret = route_unicast_packet(skb, recv_if);
- if (ret == NET_RX_DROP)
- goto dropped;
-
- goto out;
- }
-
/* skb->dev & skb->pkt_type are set here */
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
goto dropped;
@@ -752,21 +292,25 @@ void interface_rx(struct net_device *soft_iface,
/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
bat_priv->stats.rx_packets++;
- bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
+ bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
soft_iface->last_rx = jiffies;
if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
+ /* Let the bridge loop avoidance check the packet. If will
+ * not handle it, we can safely push it up.
+ */
+ if (bla_rx(bat_priv, skb, vid))
+ goto out;
+
netif_rx(skb);
goto out;
dropped:
kfree_skb(skb);
out:
- if (curr_softif_neigh)
- softif_neigh_free_ref(curr_softif_neigh);
return;
}
@@ -828,13 +372,14 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
+ atomic_set(&bat_priv->bridge_loop_avoidance, 0);
atomic_set(&bat_priv->ap_isolation, 0);
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
atomic_set(&bat_priv->gw_bandwidth, 41);
atomic_set(&bat_priv->orig_interval, 1000);
- atomic_set(&bat_priv->hop_penalty, 10);
+ atomic_set(&bat_priv->hop_penalty, 30);
atomic_set(&bat_priv->log_level, 0);
atomic_set(&bat_priv->fragmentation, 1);
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -845,6 +390,7 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->ttvn, 0);
atomic_set(&bat_priv->tt_local_changes, 0);
atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+ atomic_set(&bat_priv->bla_num_requests, 0);
bat_priv->tt_buff = NULL;
bat_priv->tt_buff_len = 0;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 756eab5b8dd4..020300673884 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -23,8 +23,6 @@
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
-void softif_neigh_purge(struct bat_priv *bat_priv);
void interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct hard_iface *recv_if,
int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 1f8692127840..a66c2dcd1088 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -27,13 +27,14 @@
#include "hash.h"
#include "originator.h"
#include "routing.h"
+#include "bridge_loop_avoidance.h"
#include <linux/crc16.h>
-static void _tt_global_del(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- const char *message);
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+ struct orig_node *orig_node);
static void tt_purge(struct work_struct *work);
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
/* returns 1 if they are the same mac addr */
static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -123,17 +124,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
common);
- if (tt_global_entry->orig_node)
- orig_node_free_ref(tt_global_entry->orig_node);
-
kfree(tt_global_entry);
}
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
{
- if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+ if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
+ tt_global_del_orig_list(tt_global_entry);
call_rcu(&tt_global_entry->common.rcu,
tt_global_entry_free_rcu);
+ }
+}
+
+static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
+ atomic_dec(&orig_entry->orig_node->tt_size);
+ orig_node_free_ref(orig_entry->orig_node);
+ kfree(orig_entry);
+}
+
+static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+{
+ call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
}
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -182,12 +197,17 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct tt_local_entry *tt_local_entry = NULL;
struct tt_global_entry *tt_global_entry = NULL;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tt_orig_list_entry *orig_entry;
int hash_added;
tt_local_entry = tt_local_hash_find(bat_priv, addr);
if (tt_local_entry) {
tt_local_entry->last_seen = jiffies;
+ /* possibly unset the TT_CLIENT_PENDING flag */
+ tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
goto out;
}
@@ -232,14 +252,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
/* Check whether it is a roaming! */
if (tt_global_entry) {
- /* This node is probably going to update its tt table */
- tt_global_entry->orig_node->tt_poss_change = true;
- /* The global entry has to be marked as ROAMING and has to be
- * kept for consistency purpose */
+ /* These node are probably going to update their tt table */
+ head = &tt_global_entry->orig_list;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ orig_entry->orig_node->tt_poss_change = true;
+
+ send_roam_adv(bat_priv, tt_global_entry->common.addr,
+ orig_entry->orig_node);
+ }
+ rcu_read_unlock();
+ /* The global entry has to be marked as ROAMING and
+ * has to be kept for consistency purpose
+ */
tt_global_entry->common.flags |= TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
- send_roam_adv(bat_priv, tt_global_entry->common.addr,
- tt_global_entry->orig_node);
}
out:
if (tt_local_entry)
@@ -490,33 +517,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_changes_list_lock);
}
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns 1 if found, 0 otherwise
+ */
+static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
+ const struct orig_node *orig_node)
+{
+ struct tt_orig_list_entry *tmp_orig_entry;
+ const struct hlist_head *head;
+ struct hlist_node *node;
+ bool found = false;
+
+ rcu_read_lock();
+ head = &entry->orig_list;
+ hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+ if (tmp_orig_entry->orig_node == orig_node) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+
+static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
+ struct orig_node *orig_node,
+ int ttvn)
+{
+ struct tt_orig_list_entry *orig_entry;
+
+ orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+ if (!orig_entry)
+ return;
+
+ INIT_HLIST_NODE(&orig_entry->list);
+ atomic_inc(&orig_node->refcount);
+ atomic_inc(&orig_node->tt_size);
+ orig_entry->orig_node = orig_node;
+ orig_entry->ttvn = ttvn;
+
+ spin_lock_bh(&tt_global_entry->list_lock);
+ hlist_add_head_rcu(&orig_entry->list,
+ &tt_global_entry->orig_list);
+ spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
/* caller must hold orig_node refcount */
int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
bool wifi)
{
- struct tt_global_entry *tt_global_entry;
- struct orig_node *orig_node_tmp;
+ struct tt_global_entry *tt_global_entry = NULL;
int ret = 0;
int hash_added;
tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
if (!tt_global_entry) {
- tt_global_entry =
- kmalloc(sizeof(*tt_global_entry),
- GFP_ATOMIC);
+ tt_global_entry = kzalloc(sizeof(*tt_global_entry),
+ GFP_ATOMIC);
if (!tt_global_entry)
goto out;
memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+
tt_global_entry->common.flags = NO_FLAGS;
- atomic_set(&tt_global_entry->common.refcount, 2);
- /* Assign the new orig_node */
- atomic_inc(&orig_node->refcount);
- tt_global_entry->orig_node = orig_node;
- tt_global_entry->ttvn = ttvn;
tt_global_entry->roam_at = 0;
+ atomic_set(&tt_global_entry->common.refcount, 2);
+
+ INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+ spin_lock_init(&tt_global_entry->list_lock);
hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
choose_orig, &tt_global_entry->common,
@@ -527,19 +597,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
tt_global_entry_free_ref(tt_global_entry);
goto out_remove;
}
- atomic_inc(&orig_node->tt_size);
+
+ tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
} else {
- if (tt_global_entry->orig_node != orig_node) {
- atomic_dec(&tt_global_entry->orig_node->tt_size);
- orig_node_tmp = tt_global_entry->orig_node;
- atomic_inc(&orig_node->refcount);
- tt_global_entry->orig_node = orig_node;
- orig_node_free_ref(orig_node_tmp);
- atomic_inc(&orig_node->tt_size);
+ /* there is already a global entry, use this one. */
+
+ /* If there is the TT_CLIENT_ROAM flag set, there is only one
+ * originator left in the list and we previously received a
+ * delete + roaming change for this originator.
+ *
+ * We should first delete the old originator before adding the
+ * new one.
+ */
+ if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
+ tt_global_del_orig_list(tt_global_entry);
+ tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = 0;
}
- tt_global_entry->common.flags = NO_FLAGS;
- tt_global_entry->ttvn = ttvn;
- tt_global_entry->roam_at = 0;
+
+ if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
+ tt_global_add_orig_entry(tt_global_entry, orig_node,
+ ttvn);
}
if (wifi)
@@ -560,6 +638,34 @@ out:
return ret;
}
+/* print all orig nodes who announce the address for this global entry.
+ * it is assumed that the caller holds rcu_read_lock();
+ */
+static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
+ struct seq_file *seq)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tt_orig_list_entry *orig_entry;
+ struct tt_common_entry *tt_common_entry;
+ uint16_t flags;
+ uint8_t last_ttvn;
+
+ tt_common_entry = &tt_global_entry->common;
+
+ head = &tt_global_entry->orig_list;
+
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ flags = tt_common_entry->flags;
+ last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
+ seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
+ tt_global_entry->common.addr, orig_entry->ttvn,
+ orig_entry->orig_node->orig, last_ttvn,
+ (flags & TT_CLIENT_ROAM ? 'R' : '.'),
+ (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+ }
+}
+
int tt_global_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -603,18 +709,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
tt_global_entry = container_of(tt_common_entry,
struct tt_global_entry,
common);
- seq_printf(seq,
- " * %pM (%3u) via %pM (%3u) [%c%c]\n",
- tt_global_entry->common.addr,
- tt_global_entry->ttvn,
- tt_global_entry->orig_node->orig,
- (uint8_t) atomic_read(
- &tt_global_entry->orig_node->
- last_ttvn),
- (tt_global_entry->common.flags &
- TT_CLIENT_ROAM ? 'R' : '.'),
- (tt_global_entry->common.flags &
- TT_CLIENT_WIFI ? 'W' : '.'));
+ tt_global_print_entry(tt_global_entry, seq);
}
rcu_read_unlock();
}
@@ -624,59 +719,150 @@ out:
return ret;
}
-static void _tt_global_del(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- const char *message)
+/* deletes the orig list of a tt_global_entry */
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
{
- if (!tt_global_entry)
- goto out;
+ struct hlist_head *head;
+ struct hlist_node *node, *safe;
+ struct tt_orig_list_entry *orig_entry;
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM (via %pM): %s\n",
- tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
- message);
+ spin_lock_bh(&tt_global_entry->list_lock);
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+ hlist_del_rcu(node);
+ tt_orig_list_entry_free_ref(orig_entry);
+ }
+ spin_unlock_bh(&tt_global_entry->list_lock);
- atomic_dec(&tt_global_entry->orig_node->tt_size);
+}
+
+static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
+ struct tt_global_entry *tt_global_entry,
+ struct orig_node *orig_node,
+ const char *message)
+{
+ struct hlist_head *head;
+ struct hlist_node *node, *safe;
+ struct tt_orig_list_entry *orig_entry;
+
+ spin_lock_bh(&tt_global_entry->list_lock);
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+ if (orig_entry->orig_node == orig_node) {
+ bat_dbg(DBG_TT, bat_priv,
+ "Deleting %pM from global tt entry %pM: %s\n",
+ orig_node->orig, tt_global_entry->common.addr,
+ message);
+ hlist_del_rcu(node);
+ tt_orig_list_entry_free_ref(orig_entry);
+ }
+ }
+ spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+static void tt_global_del_struct(struct bat_priv *bat_priv,
+ struct tt_global_entry *tt_global_entry,
+ const char *message)
+{
+ bat_dbg(DBG_TT, bat_priv,
+ "Deleting global tt entry %pM: %s\n",
+ tt_global_entry->common.addr, message);
hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
tt_global_entry->common.addr);
-out:
- if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ tt_global_entry_free_ref(tt_global_entry);
+
}
-void tt_global_del(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const unsigned char *addr,
- const char *message, bool roaming)
+/* If the client is to be deleted, we check if it is the last origantor entry
+ * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
+ * otherwise we simply remove the originator scheduled for deletion.
+ */
+static void tt_global_del_roaming(struct bat_priv *bat_priv,
+ struct tt_global_entry *tt_global_entry,
+ struct orig_node *orig_node,
+ const char *message)
+{
+ bool last_entry = true;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tt_orig_list_entry *orig_entry;
+
+ /* no local entry exists, case 1:
+ * Check if this is the last one or if other entries exist.
+ */
+
+ rcu_read_lock();
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ if (orig_entry->orig_node != orig_node) {
+ last_entry = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (last_entry) {
+ /* its the last one, mark for roaming. */
+ tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ } else
+ /* there is another entry, we can simply delete this
+ * one and can still use the other one.
+ */
+ tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
+}
+
+
+
+static void tt_global_del(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ const unsigned char *addr,
+ const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry = NULL;
struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
- if (!tt_global_entry || tt_global_entry->orig_node != orig_node)
+ if (!tt_global_entry)
goto out;
- if (!roaming)
- goto out_del;
+ if (!roaming) {
+ tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
+ message);
+
+ if (hlist_empty(&tt_global_entry->orig_list))
+ tt_global_del_struct(bat_priv, tt_global_entry,
+ message);
+
+ goto out;
+ }
/* if we are deleting a global entry due to a roam
* event, there are two possibilities:
- * 1) the client roamed from node A to node B => we mark
+ * 1) the client roamed from node A to node B => if there
+ * is only one originator left for this client, we mark
* it with TT_CLIENT_ROAM, we start a timer and we
* wait for node B to claim it. In case of timeout
* the entry is purged.
+ *
+ * If there are other originators left, we directly delete
+ * the originator.
* 2) the client roamed to us => we can directly delete
* the global entry, since it is useless now. */
+
tt_local_entry = tt_local_hash_find(bat_priv,
tt_global_entry->common.addr);
- if (!tt_local_entry) {
- tt_global_entry->common.flags |= TT_CLIENT_ROAM;
- tt_global_entry->roam_at = jiffies;
- goto out;
- }
+ if (tt_local_entry) {
+ /* local entry exists, case 2: client roamed to us. */
+ tt_global_del_orig_list(tt_global_entry);
+ tt_global_del_struct(bat_priv, tt_global_entry, message);
+ } else
+ /* no local entry exists, case 1: check for roaming */
+ tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
+ message);
-out_del:
- _tt_global_del(bat_priv, tt_global_entry, message);
out:
if (tt_global_entry)
@@ -709,11 +895,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
tt_global_entry = container_of(tt_common_entry,
struct tt_global_entry,
common);
- if (tt_global_entry->orig_node == orig_node) {
+
+ tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
+
+ if (hlist_empty(&tt_global_entry->orig_list)) {
bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM (via %pM): %s\n",
+ "Deleting global tt entry %pM: %s\n",
tt_global_entry->common.addr,
- tt_global_entry->orig_node->orig,
message);
hlist_del_rcu(node);
tt_global_entry_free_ref(tt_global_entry);
@@ -754,7 +943,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
bat_dbg(DBG_TT, bat_priv,
"Deleting global tt entry (%pM): Roaming timeout\n",
tt_global_entry->common.addr);
- atomic_dec(&tt_global_entry->orig_node->tt_size);
+
hlist_del_rcu(node);
tt_global_entry_free_ref(tt_global_entry);
}
@@ -817,6 +1006,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
struct tt_local_entry *tt_local_entry = NULL;
struct tt_global_entry *tt_global_entry = NULL;
struct orig_node *orig_node = NULL;
+ struct neigh_node *router = NULL;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct tt_orig_list_entry *orig_entry;
+ int best_tq;
if (src && atomic_read(&bat_priv->ap_isolation)) {
tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -833,11 +1027,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
- if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
- goto out;
+ best_tq = 0;
- orig_node = tt_global_entry->orig_node;
+ rcu_read_lock();
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+ router = orig_node_get_router(orig_entry->orig_node);
+ if (!router)
+ continue;
+ if (router->tq_avg > best_tq) {
+ orig_node = orig_entry->orig_node;
+ best_tq = router->tq_avg;
+ }
+ neigh_node_free_ref(router);
+ }
+ /* found anything? */
+ if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
+ orig_node = NULL;
+ rcu_read_unlock();
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
@@ -848,7 +1056,8 @@ out:
}
/* Calculates the checksum of the local table of a given orig_node */
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+static uint16_t tt_global_crc(struct bat_priv *bat_priv,
+ struct orig_node *orig_node)
{
uint16_t total = 0, total_one;
struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -868,20 +1077,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
tt_global_entry = container_of(tt_common_entry,
struct tt_global_entry,
common);
- if (compare_eth(tt_global_entry->orig_node,
- orig_node)) {
- /* Roaming clients are in the global table for
- * consistency only. They don't have to be
- * taken into account while computing the
- * global crc */
- if (tt_common_entry->flags & TT_CLIENT_ROAM)
- continue;
- total_one = 0;
- for (j = 0; j < ETH_ALEN; j++)
- total_one = crc16_byte(total_one,
- tt_common_entry->addr[j]);
- total ^= total_one;
- }
+ /* Roaming clients are in the global table for
+ * consistency only. They don't have to be
+ * taken into account while computing the
+ * global crc
+ */
+ if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+ continue;
+
+ /* find out if this global entry is announced by this
+ * originator
+ */
+ if (!tt_global_entry_has_orig(tt_global_entry,
+ orig_node))
+ continue;
+
+ total_one = 0;
+ for (j = 0; j < ETH_ALEN; j++)
+ total_one = crc16_byte(total_one,
+ tt_global_entry->common.addr[j]);
+ total ^= total_one;
}
rcu_read_unlock();
}
@@ -936,8 +1151,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_req_list_lock);
}
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes)
+static void tt_save_orig_buffer(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ const unsigned char *tt_buff,
+ uint8_t tt_num_changes)
{
uint16_t tt_buff_len = tt_len(tt_num_changes);
@@ -1020,7 +1237,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
common);
- return (tt_global_entry->orig_node == orig_node);
+ return tt_global_entry_has_orig(tt_global_entry, orig_node);
}
static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1124,7 +1341,7 @@ static int send_tt_request(struct bat_priv *bat_priv,
memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
tt_request->header.ttl = TTL;
tt_request->ttvn = ttvn;
- tt_request->tt_data = tt_crc;
+ tt_request->tt_data = htons(tt_crc);
tt_request->flags = TT_REQUEST;
if (full_table)
@@ -1401,10 +1618,15 @@ out:
bool send_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_request)
{
- if (is_my_mac(tt_request->dst))
+ if (is_my_mac(tt_request->dst)) {
+ /* don't answer backbone gws! */
+ if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+ return true;
+
return send_my_tt_response(bat_priv, tt_request);
- else
+ } else {
return send_other_tt_response(bat_priv, tt_request);
+ }
}
static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1508,6 +1730,10 @@ void handle_tt_response(struct bat_priv *bat_priv,
tt_response->src, tt_response->ttvn, tt_response->tt_data,
(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+ /* we should have never asked a backbone gw */
+ if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+ goto out;
+
orig_node = orig_hash_find(bat_priv, tt_response->src);
if (!orig_node)
goto out;
@@ -1627,8 +1853,8 @@ unlock:
return ret;
}
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node)
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+ struct orig_node *orig_node)
{
struct neigh_node *neigh_node = NULL;
struct sk_buff *skb = NULL;
@@ -1796,6 +2022,8 @@ void tt_commit_changes(struct bat_priv *bat_priv)
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->ttvn);
+ bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
+ (uint8_t)atomic_read(&bat_priv->ttvn));
bat_priv->tt_poss_change = false;
}
@@ -1836,6 +2064,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
bool full_table = true;
+ /* don't care about a backbone gateways updates. */
+ if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+ return;
+
/* orig table not initialised AND first diff is in the OGM OR the ttvn
* increased by one -> we can apply the attached changes */
if ((!orig_node->tt_initialised && ttvn == 1) ||
@@ -1873,6 +2105,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
} else {
/* if we missed more than one change or our tables are not
* in sync anymore -> request fresh tt data */
+
if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
orig_node->tt_crc != tt_crc) {
request_table:
@@ -1886,3 +2119,22 @@ request_table:
}
}
}
+
+/* returns true whether we know that the client has moved from its old
+ * originator to another one. This entry is kept is still kept for consistency
+ * purposes
+ */
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
+{
+ struct tt_global_entry *tt_global_entry;
+ bool ret = false;
+
+ tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ if (!tt_global_entry)
+ goto out;
+
+ ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
+ tt_global_entry_free_ref(tt_global_entry);
+out:
+ return ret;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c753633b1da1..c43374dc364d 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -39,27 +39,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
int tt_global_seq_print_text(struct seq_file *seq, void *offset);
void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message);
-void tt_global_del(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const unsigned char *addr,
- const char *message, bool roaming);
struct orig_node *transtable_search(struct bat_priv *bat_priv,
const uint8_t *src, const uint8_t *addr);
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes);
uint16_t tt_local_crc(struct bat_priv *bat_priv);
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
void tt_free(struct bat_priv *bat_priv);
bool send_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_request);
bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
void handle_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_response);
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node);
void tt_commit_changes(struct bat_priv *bat_priv);
bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, uint8_t tt_num_changes,
uint8_t ttvn, uint16_t tt_crc);
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
+
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 302efb523475..61308e8016ff 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -27,7 +27,7 @@
#include "packet.h"
#include "bitarray.h"
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
+#define BAT_HEADER_LEN (ETH_HLEN + \
((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
sizeof(struct unicast_packet) : \
sizeof(struct bcast_packet))))
@@ -52,7 +52,7 @@ struct hard_iface {
/**
* orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
- * @last_valid: when last packet from this node was received
+ * @last_seen: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
* @batman_seqno_reset: time when the batman seqno window was reset
* @gw_flags: flags related to gateway class
@@ -70,7 +70,7 @@ struct orig_node {
struct neigh_node __rcu *router; /* rcu protected pointer */
unsigned long *bcast_own;
uint8_t *bcast_own_sum;
- unsigned long last_valid;
+ unsigned long last_seen;
unsigned long bcast_seqno_reset;
unsigned long batman_seqno_reset;
uint8_t gw_flags;
@@ -90,7 +90,7 @@ struct orig_node {
bool tt_poss_change;
uint32_t last_real_seqno;
uint8_t last_ttl;
- unsigned long bcast_bits[NUM_WORDS];
+ DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
uint32_t last_bcast_seqno;
struct hlist_head neigh_list;
struct list_head frag_list;
@@ -120,7 +120,7 @@ struct gw_node {
/**
* neigh_node
- * @last_valid: when last packet via this neighbor was received
+ * @last_seen: when last packet via this neighbor was received
*/
struct neigh_node {
struct hlist_node list;
@@ -131,15 +131,22 @@ struct neigh_node {
uint8_t tq_avg;
uint8_t last_ttl;
struct list_head bonding_list;
- unsigned long last_valid;
- unsigned long real_bits[NUM_WORDS];
+ unsigned long last_seen;
+ DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
atomic_t refcount;
struct rcu_head rcu;
struct orig_node *orig_node;
struct hard_iface *if_incoming;
- spinlock_t tq_lock; /* protects: tq_recv, tq_index */
+ spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
};
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct bcast_duplist_entry {
+ uint8_t orig[ETH_ALEN];
+ uint16_t crc;
+ unsigned long entrytime;
+};
+#endif
struct bat_priv {
atomic_t mesh_state;
@@ -148,6 +155,7 @@ struct bat_priv {
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
atomic_t ap_isolation; /* boolean */
+ atomic_t bridge_loop_avoidance; /* boolean */
atomic_t vis_mode; /* VIS_TYPE_* */
atomic_t gw_mode; /* GW_MODE_* */
atomic_t gw_sel_class; /* uint */
@@ -161,6 +169,7 @@ struct bat_priv {
atomic_t ttvn; /* translation table version number */
atomic_t tt_ogm_append_cnt;
atomic_t tt_local_changes; /* changes registered in a OGM interval */
+ atomic_t bla_num_requests; /* number of bla requests in flight */
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
* If true, then I received a Roaming_adv and I have to inspect every
* packet directed to me to check whether I am still the true
@@ -174,15 +183,23 @@ struct bat_priv {
struct hlist_head forw_bat_list;
struct hlist_head forw_bcast_list;
struct hlist_head gw_list;
- struct hlist_head softif_neigh_vids;
struct list_head tt_changes_list; /* tracks changes in a OGM int */
struct list_head vis_send_list;
struct hashtable_t *orig_hash;
struct hashtable_t *tt_local_hash;
struct hashtable_t *tt_global_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+ struct hashtable_t *claim_hash;
+ struct hashtable_t *backbone_hash;
+#endif
struct list_head tt_req_list; /* list of pending tt_requests */
struct list_head tt_roam_list;
struct hashtable_t *vis_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+ struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+ int bcast_duplist_curr;
+ struct bla_claim_dst claim_dest;
+#endif
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock; /* protects */
spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -191,8 +208,6 @@ struct bat_priv {
spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
spinlock_t vis_hash_lock; /* protects vis_hash */
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
- spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
- spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
atomic_t num_local_tt;
/* Checksum of the local table, recomputed before sending a new OGM */
atomic_t tt_crc;
@@ -202,6 +217,7 @@ struct bat_priv {
struct delayed_work tt_work;
struct delayed_work orig_work;
struct delayed_work vis_work;
+ struct delayed_work bla_work;
struct gw_node __rcu *curr_gw; /* rcu protected pointer */
atomic_t gw_reselect;
struct hard_iface __rcu *primary_if; /* rcu protected pointer */
@@ -239,10 +255,41 @@ struct tt_local_entry {
struct tt_global_entry {
struct tt_common_entry common;
+ struct hlist_head orig_list;
+ spinlock_t list_lock; /* protects the list */
+ unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+};
+
+struct tt_orig_list_entry {
struct orig_node *orig_node;
uint8_t ttvn;
- unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+ struct rcu_head rcu;
+ struct hlist_node list;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct backbone_gw {
+ uint8_t orig[ETH_ALEN];
+ short vid; /* used VLAN ID */
+ struct hlist_node hash_entry;
+ struct bat_priv *bat_priv;
+ unsigned long lasttime; /* last time we heard of this backbone gw */
+ atomic_t request_sent;
+ atomic_t refcount;
+ struct rcu_head rcu;
+ uint16_t crc; /* crc checksum over all claims */
+};
+
+struct claim {
+ uint8_t addr[ETH_ALEN];
+ short vid;
+ struct backbone_gw *backbone_gw;
+ unsigned long lasttime; /* last time we heard of claim (locals only) */
+ struct rcu_head rcu;
+ atomic_t refcount;
+ struct hlist_node hash_entry;
};
+#endif
struct tt_change_node {
struct list_head list;
@@ -327,41 +374,24 @@ struct recvlist_node {
uint8_t mac[ETH_ALEN];
};
-struct softif_neigh_vid {
- struct hlist_node list;
- struct bat_priv *bat_priv;
- short vid;
- atomic_t refcount;
- struct softif_neigh __rcu *softif_neigh;
- struct rcu_head rcu;
- struct hlist_head softif_neigh_list;
-};
-
-struct softif_neigh {
- struct hlist_node list;
- uint8_t addr[ETH_ALEN];
- unsigned long last_seen;
- atomic_t refcount;
- struct rcu_head rcu;
-};
-
struct bat_algo_ops {
struct hlist_node list;
char *name;
- /* init OGM when hard-interface is enabled */
- void (*bat_ogm_init)(struct hard_iface *hard_iface);
- /* init primary OGM when primary interface is selected */
- void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
- /* init mac addresses of the OGM belonging to this hard-interface */
- void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
+ /* init routing info when hard-interface is enabled */
+ int (*bat_iface_enable)(struct hard_iface *hard_iface);
+ /* de-init routing info when hard-interface is disabled */
+ void (*bat_iface_disable)(struct hard_iface *hard_iface);
+ /* (re-)init mac addresses of the protocol information
+ * belonging to this hard-interface
+ */
+ void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
+ /* called when primary interface is selected / changed */
+ void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
/* prepare a new outgoing OGM for the send queue */
void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
int tt_num_changes);
/* send scheduled OGM */
void (*bat_ogm_emit)(struct forw_packet *forw_packet);
- /* receive incoming OGM */
- void (*bat_ogm_receive)(struct hard_iface *if_incoming,
- struct sk_buff *skb);
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 676f6a626b2c..74175c210858 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -331,6 +331,14 @@ find_router:
unicast_packet->ttvn =
(uint8_t)atomic_read(&orig_node->last_ttvn);
+ /* inform the destination node that we are still missing a correct route
+ * for this client. The destination will receive this packet and will
+ * try to reroute it because the ttvn contained in the header is less
+ * than the current one
+ */
+ if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ unicast_packet->ttvn = unicast_packet->ttvn - 1;
+
if (atomic_read(&bat_priv->fragmentation) &&
data_len + sizeof(*unicast_packet) >
neigh_node->if_incoming->net_dev->mtu) {
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c4a5b8cafada..cec216fb77c7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
return NULL;
info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
- sizeof(struct ethhdr));
+ ETH_HLEN);
if (!info->skb_packet) {
kfree(info);
return NULL;
}
- skb_reserve(info->skb_packet, sizeof(struct ethhdr));
+ skb_reserve(info->skb_packet, ETH_HLEN);
packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
+ vis_info_len);
@@ -894,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv)
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
MAX_VIS_PACKET_SIZE +
- sizeof(struct ethhdr));
+ ETH_HLEN);
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
- skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
+ skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
sizeof(*packet));
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 72eb187a5f60..46e7f86acfc9 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
sk->sk_state == BT_CONFIG)
return mask;
- if (sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a779ec703323..031d7d656754 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
BT_DBG("");
list_for_each_entry(s, &bnep_session_list, list)
- if (!compare_ether_addr(dst, s->eh.h_source))
+ if (ether_addr_equal(dst, s->eh.h_source))
return s;
return NULL;
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
@@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
iv[il++] = (struct kvec) { &type, 1 };
len++;
- if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source))
+ if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
type |= 0x01;
- if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest))
+ if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
type |= 0x02;
if (type)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5238b6b3ea6a..3f18a6ed9731 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
- memcpy(cp.ltk, ltk, sizeof(ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
-void hci_le_ltk_neg_reply(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_neg_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
-}
-
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (type == LE_LINK) {
- struct adv_entry *entry;
-
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le)
- return ERR_PTR(-EBUSY);
-
- entry = hci_find_adv_entry(hdev, dst);
- if (!entry)
- return ERR_PTR(-EHOSTUNREACH);
+ if (!le) {
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
- le = hci_conn_add(hdev, LE_LINK, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- le->dst_type = entry->bdaddr_type;
+ le->dst_type = bdaddr_to_le(dst_type);
+ hci_le_connect(le);
+ }
- hci_le_connect(le);
+ le->pending_sec_level = sec_level;
+ le->auth_type = auth_type;
hci_conn_hold(le);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e33af63a884a..411ace8e647b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
struct sk_buff *skb;
/* Some CSR based controllers generate a spontaneous
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
* command.
*/
- if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
+ if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
return;
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read Local AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPED:
if (hdev->discovery.state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
- hdev->discovery.type = 0;
break;
case DISCOVERY_STARTING:
break;
@@ -665,6 +668,11 @@ int hci_dev_open(__u16 dev)
hci_req_lock(hdev);
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
ret = -ERFKILL;
goto done;
@@ -1084,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
-{
- struct hci_dev *hdev;
-
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
- return NULL;
-
- hci_init_sysfs(hdev);
- skb_queue_head_init(&hdev->driver_init);
-
- return hdev;
-}
-EXPORT_SYMBOL(hci_alloc_dev);
-
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
-{
- skb_queue_purge(&hdev->driver_init);
-
- /* will free via device release */
- put_device(&hdev->dev);
-}
-EXPORT_SYMBOL(hci_free_dev);
-
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1210,40 +1192,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
return NULL;
}
-static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
- return 1;
+ return true;
/* Debug keys are insecure so don't store them persistently */
if (key_type == HCI_LK_DEBUG_COMBINATION)
- return 0;
+ return false;
/* Changed combination key and there's no previous one */
if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
- return 0;
+ return false;
/* Security mode 3 case */
if (!conn)
- return 1;
+ return true;
/* Neither local nor remote side had no-bonding as requirement */
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
- return 1;
+ return true;
/* Local side had dedicated bonding as requirement */
if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
- return 1;
+ return true;
/* Remote side had dedicated bonding as requirement */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
- return 1;
+ return true;
/* If none of the above criteria match, then don't store the key
* persistently */
- return 0;
+ return false;
}
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1280,7 +1262,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
- u8 old_key_type, persistent;
+ u8 old_key_type;
+ bool persistent;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
@@ -1323,16 +1306,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
mgmt_new_link_key(hdev, key, persistent);
- if (!persistent) {
- list_del(&key->list);
- kfree(key);
- }
+ if (conn)
+ conn->flush_key = !persistent;
return 0;
}
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
ediv, u8 rand[8])
{
struct smp_ltk *key, *old_key;
@@ -1540,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return mgmt_device_unblocked(hdev, bdaddr, type);
}
-static void hci_clear_adv_cache(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct adv_entry *entry;
-
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
-
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
- return -EINVAL;
-
- /* Only new entries should be added to adv_entries. So, if
- * bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
- return 0;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- bacpy(&entry->bdaddr, &ev->bdaddr);
- entry->bdaddr_type = ev->bdaddr_type;
-
- list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
-
- return 0;
-}
-
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
struct le_scan_params *param = (struct le_scan_params *) opt;
@@ -1666,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return 0;
}
+int hci_cancel_le_scan(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EALREADY;
+
+ if (cancel_delayed_work(&hdev->le_scan_disable)) {
+ struct hci_cp_le_set_scan_enable cp;
+
+ /* Send HCI command to disable LE Scan */
+ memset(&cp, 0, sizeof(cp));
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
+
+ return 0;
+}
+
static void le_scan_disable_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1710,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
return 0;
}
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
{
- struct list_head *head = &hci_dev_list, *p;
- int i, id, error;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- if (!hdev->open || !hdev->close)
- return -EINVAL;
-
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
-
- write_lock(&hci_dev_list_lock);
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
- }
-
- sprintf(hdev->name, "hci%d", id);
- hdev->id = id;
- list_add_tail(&hdev->list, head);
+ struct hci_dev *hdev;
- mutex_init(&hdev->lock);
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
- hdev->flags = 0;
- hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
-
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- discovery_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
hci_conn_hash_init(hdev);
- INIT_LIST_HEAD(&hdev->mgmt_pending);
-
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- INIT_LIST_HEAD(&hdev->uuids);
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ skb_queue_purge(&hdev->driver_init);
- INIT_LIST_HEAD(&hdev->link_keys);
- INIT_LIST_HEAD(&hdev->long_term_keys);
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- INIT_LIST_HEAD(&hdev->remote_oob_data);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ struct list_head *head, *p;
+ int id, error;
- INIT_LIST_HEAD(&hdev->adv_entries);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
- INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ write_lock(&hci_dev_list_lock);
- INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+ head = &hci_dev_list;
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+ /* Find first available device id */
+ list_for_each(p, &hci_dev_list) {
+ int nid = list_entry(p, struct hci_dev, list)->id;
+ if (nid > id)
+ break;
+ if (nid == id)
+ id++;
+ head = p;
+ }
- atomic_set(&hdev->promisc, 0);
+ sprintf(hdev->name, "hci%d", id);
+ hdev->id = id;
- INIT_WORK(&hdev->le_scan, le_scan_work);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ list_add(&hdev->list, head);
write_unlock(&hci_dev_list_lock);
@@ -1849,6 +1787,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -1878,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_del_sysfs(hdev);
- cancel_delayed_work_sync(&hdev->adv_work);
-
destroy_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
@@ -1888,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
- hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -2225,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
+
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
@@ -2268,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
hci_queue_acl(conn, &chan->data_q, skb, flags);
@@ -2307,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
- int num = 0, min = ~0;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
@@ -2388,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
- int num = 0, min = ~0, cur_prio = 0;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
int cnt, q, conn_num = 0;
@@ -2778,6 +2719,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) {
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
+ hci_dev_unlock(hdev);
+
/* Send to upper protocol */
l2cap_recv_acldata(conn, skb, flags);
return;
@@ -2931,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_INQUIRY, &hdev->flags))
- return -EPERM;
+ return -EALREADY;
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
+
+u8 bdaddr_to_le(u8 bdaddr_type)
+{
+ switch (bdaddr_type) {
+ case BDADDR_LE_PUBLIC:
+ return ADDR_LE_DEV_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return ADDR_LE_DEV_RANDOM;
+ }
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..4eefb7f65cf6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ if (status)
+ return;
+
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+}
+
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+
hci_conn_check_pending(hdev);
}
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_RESET, status);
/* Reset all non-persistent flags */
- hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
+ hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
+ BIT(HCI_PERIODIC_INQ));
hdev->discovery.state = DISCOVERY_STOPPED;
}
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
- events[4] |= 0x04; /* Inquiry Result with RSSI */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
@@ -615,6 +630,7 @@ done:
static void hci_setup_link_policy(struct hci_dev *hdev)
{
+ struct hci_cp_write_def_link_policy cp;
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
- link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
- &link_policy);
+ cp.policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
memset(&cp, 0, sizeof(cp));
- if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
- hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
- cancel_delayed_work_sync(&hdev->adv_work);
-
hci_dev_lock(hdev);
- hci_adv_entries_clear(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
- schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
-
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ hdev->discovery.state == DISCOVERY_FINDING) {
mgmt_interleaved_discovery(hdev);
} else {
hci_dev_lock(hdev);
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
+ mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
+ conn->dst_type, status);
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -1901,6 +1924,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status == 0) {
+ if (conn->type == ACL_LINK && conn->flush_key)
+ hci_remove_link_key(hdev, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
}
@@ -2037,6 +2062,12 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ if (ev->status && conn->state == BT_CONNECTED) {
+ hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_put(conn);
+ goto unlock;
+ }
+
if (conn->state == BT_CONFIG) {
if (!ev->status)
conn->state = BT_CONNECTED;
@@ -2047,6 +2078,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -2100,7 +2132,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
}
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
@@ -2145,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
+ break;
+
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@@ -2311,6 +2347,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
case HCI_OP_USER_PASSKEY_NEG_REPLY:
hci_cc_user_passkey_neg_reply(hdev, skb);
+ break;
case HCI_OP_LE_SET_SCAN_PARAM:
hci_cc_le_set_scan_param(hdev, skb);
@@ -2796,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2868,7 +2908,7 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
if (conn->state != BT_CONFIG)
goto unlock;
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
@@ -2961,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2990,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
&ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
- ssp, info->data, sizeof(info->data));
+ ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
@@ -3312,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
- hci_add_adv_entry(hdev, ev);
-
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
@@ -3333,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+ BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc154298979a..937f3187eafa 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
- u32 data0, data4;
- u16 data1, data2, data3, data5;
+ __be32 data0, data4;
+ __be16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
err = device_add(dev);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index d478be11d562..2c20d765b394 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1195,41 +1195,16 @@ int hidp_get_conninfo(struct hidp_conninfo *ci)
return err;
}
-static const struct hid_device_id hidp_table[] = {
- { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) },
- { }
-};
-
-static struct hid_driver hidp_driver = {
- .name = "generic-bluetooth",
- .id_table = hidp_table,
-};
-
static int __init hidp_init(void)
{
- int ret;
-
BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
- ret = hid_register_driver(&hidp_driver);
- if (ret)
- goto err;
-
- ret = hidp_init_sockets();
- if (ret)
- goto err_drv;
-
- return 0;
-err_drv:
- hid_unregister_driver(&hidp_driver);
-err:
- return ret;
+ return hidp_init_sockets();
}
static void __exit hidp_exit(void)
{
hidp_cleanup_sockets();
- hid_unregister_driver(&hidp_driver);
}
module_init(hidp_init);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b8e17e4dac8b..24f144b72a96 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
+ struct l2cap_chan *chan, int err);
/* ---- L2CAP channels ---- */
@@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
}
/* Find channel with given SCID.
- * Returns locked socket */
+ * Returns locked channel. */
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
mutex_unlock(&conn->chan_lock);
return c;
@@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
{
struct l2cap_chan *c;
@@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
+{
+ size_t alloc_size, i;
+
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
+
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ if (!seq_list->list)
+ return -ENOMEM;
+
+ seq_list->mask = alloc_size - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < alloc_size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ return 0;
+}
+
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
+{
+ kfree(seq_list->list);
+}
+
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ /* Constant-time check for list membership */
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed in constant time */
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Walk the list to find the sequence number */
+ u16 prev = seq_list->head;
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL)
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+
+ /* Unlink the number from the list and clear it */
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ /* Remove the head in constant time */
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ u16 i;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ /* All appends happen in constant time */
+
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+}
+
static void l2cap_chan_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
+struct l2cap_chan *l2cap_chan_create(void)
{
struct l2cap_chan *chan;
@@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
mutex_init(&chan->lock);
- chan->sk = sk;
-
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
atomic_set(&chan->refcnt, 1);
- BT_DBG("sk %p chan %p", sk, chan);
+ BT_DBG("chan %p", chan);
return chan;
}
@@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+{
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+}
+
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ __le16_to_cpu(chan->psm), chan->dcid);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
@@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
skb_queue_purge(&chan->srej_q);
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
list_del(&l->list);
kfree(l);
@@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
case BT_CONFIG:
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_send_disconn_req(conn, chan, reason);
} else
@@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
struct l2cap_conn_rsp rsp;
__u16 result;
- if (bt_sk(sk)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
@@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(chan->conn->hchan, skb, flags);
}
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
@@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->sk;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_chan_ready(chan);
+ return;
+ }
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
@@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (l2cap_chan_check_security(chan)) {
lock_sock(sk);
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
}
-/* Find socket with cid and source bdaddr.
+/* Find socket with cid and source/destination bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
continue;
if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
- conn->src);
+ conn->src, conn->dst);
if (!pchan)
return;
@@ -910,29 +1180,6 @@ clean:
release_sock(parent);
}
-static void l2cap_chan_ready(struct l2cap_chan *chan)
-{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-
- release_sock(sk);
-}
-
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
@@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, err);
@@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
}
mutex_unlock(&conn->chan_lock);
@@ -1100,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
/* ---- Socket interface ---- */
-/* Find socket with psm and source bdaddr.
+/* Find socket with psm and source / destination bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -1116,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
continue;
if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -1133,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
@@ -1143,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
__u8 auth_type;
int err;
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
+ dst_type, __le16_to_cpu(chan->psm));
hdev = hci_get_route(dst, src);
if (!hdev)
@@ -1218,11 +1478,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
auth_type = l2cap_get_auth_type(chan);
if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
else
- hcon = hci_connect(hdev, ACL_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
@@ -1236,6 +1496,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
goto done;
}
+ if (hcon->type == LE_LINK) {
+ err = 0;
+
+ if (!list_empty(&conn->chan_l)) {
+ err = -EBUSY;
+ hci_conn_put(hcon);
+ }
+
+ if (err)
+ goto done;
+ }
+
/* Update source addr of the socket */
bacpy(src, conn->src);
@@ -1308,6 +1580,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
if (chan->retry_count >= chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return;
}
@@ -1316,6 +1589,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_retrans_timeout(struct work_struct *work)
@@ -1335,6 +1609,7 @@ static void l2cap_retrans_timeout(struct work_struct *work)
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1343,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
while ((skb = skb_peek(&chan->tx_q)) &&
chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
+ if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
break;
skb = skb_dequeue(&chan->tx_q);
@@ -1365,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
while ((skb = skb_dequeue(&chan->tx_q))) {
control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1390,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
if (!skb)
return;
- while (bt_cb(skb)->tx_seq != tx_seq) {
+ while (bt_cb(skb)->control.txseq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
skb = skb_queue_next(&chan->tx_q, skb);
}
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
return;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1437,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (chan->state != BT_CONNECTED)
return -ENOTCONN;
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
break;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1457,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
control |= __set_reqseq(chan, chan->buffer_seq);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1471,11 +1751,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
+ bt_cb(skb)->control.txseq = chan->next_tx_seq;
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
- if (bt_cb(skb)->retries == 1) {
+ if (bt_cb(skb)->control.retries == 1) {
chan->unacked_frames++;
if (!nsent++)
@@ -1551,7 +1831,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff **frag;
- int err, sent = 0;
+ int sent = 0;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
@@ -1562,14 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ struct sk_buff *tmp;
+
count = min_t(unsigned int, conn->mtu, len);
- *frag = chan->ops->alloc_skb(chan, count,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
- if (!*frag)
- return err;
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
@@ -1578,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
sent += count;
len -= count;
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
frag = &(*frag)->next;
}
@@ -1598,18 +1884,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1625,25 +1910,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
+ int err, count;
struct l2cap_hdr *lh;
BT_DBG("chan %p len %d", chan, (int)len);
- count = min_t(unsigned int, (conn->mtu - hlen), len);
-
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
- if (!skb)
- return ERR_PTR(err);
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ lh->len = cpu_to_le16(len);
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1655,7 +1939,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u32 control, u16 sdulen)
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
@@ -1681,17 +1965,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1705,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (chan->fcs == L2CAP_FCS_CRC16)
put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
- bt_cb(skb)->retries = 0;
+ bt_cb(skb)->control.retries = 0;
return skb;
}
-static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u32 control;
- size_t size = 0;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
- skb_queue_head_init(&sar_queue);
- control = __set_ctrl_sar(chan, L2CAP_SAR_START);
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
- while (len > 0) {
- size_t buflen;
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = chan->conn->mtu;
- if (len > chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
- buflen = chan->remote_mps;
- } else {
- control = __set_ctrl_sar(chan, L2CAP_SAR_END);
- buflen = len;
- }
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
+ __skb_queue_purge(seg_queue);
return PTR_ERR(skb);
}
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ return err;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority)
{
struct sk_buff *skb;
- u32 control;
int err;
+ struct sk_buff_head seg_queue;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1788,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- __skb_queue_tail(&chan->tx_q, skb);
+ __skb_queue_head_init(&seg_queue);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ if (err)
break;
- }
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
- }
+ if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
+ chan->tx_send_head = seg_queue.next;
+ skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
+
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = l2cap_ertm_send(chan);
+ else
+ l2cap_streaming_send(chan);
- err = l2cap_ertm_send(chan);
if (err >= 0)
err = len;
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
break;
default:
@@ -2037,13 +2346,29 @@ static void l2cap_ack_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static inline int l2cap_ertm_init(struct l2cap_chan *chan)
{
+ int err;
+
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2052,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
+
+ return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2375,9 +2705,9 @@ done:
chan->remote_mps = size;
rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2641,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__le16 psm = req->psm;
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
@@ -2703,7 +3033,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
__l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
@@ -2845,7 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
u16 dcid, flags;
u8 rsp[64];
struct l2cap_chan *chan;
- int len;
+ int len, err = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
@@ -2856,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!chan)
return -ENOENT;
- l2cap_chan_lock(chan);
-
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
@@ -2912,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
- l2cap_chan_ready(chan);
goto unlock;
}
@@ -2946,7 +3276,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
unlock:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2954,21 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct l2cap_chan *chan;
- int len = cmd->len - sizeof(*rsp);
+ int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
+ int err = 0;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan)
return 0;
- l2cap_chan_lock(chan);
-
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -3042,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
set_default_fcs(chan);
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- l2cap_chan_ready(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
}
done:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3089,11 +3419,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
release_sock(sk);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3121,11 +3453,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_lock(chan);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3262,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
/* Placeholder: Always reject */
rsp.dcid = 0;
rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CR_NO_MEM;
- rsp.status = L2CAP_CS_NO_INFO;
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
sizeof(rsp), &rsp);
@@ -3662,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
next_skb = skb_peek(&chan->srej_q);
tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
while (next_skb) {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
+ if (bt_cb(next_skb)->control.txseq == tx_seq)
return -EINVAL;
next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->tx_seq, chan->buffer_seq);
+ bt_cb(next_skb)->control.txseq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3797,6 +4131,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
__set_ack_timer(chan);
}
@@ -3845,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
int err;
- if (bt_cb(skb)->tx_seq != tx_seq)
+ if (bt_cb(skb)->control.txseq != tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
+ control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3889,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
while (tx_seq != chan->expected_tx_seq) {
control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
control |= __set_reqseq(chan, chan->expected_tx_seq);
+ l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -4019,8 +4355,8 @@ expected:
chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
__skb_queue_tail(&chan->srej_q, skb);
return 0;
}
@@ -4217,6 +4553,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
+ __unpack_control(chan, skb);
+
control = __get_control(chan, skb->data);
skb_pull(skb, __ctrl_size(chan));
len = skb->len;
@@ -4292,8 +4630,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
return 0;
}
- l2cap_chan_lock(chan);
-
BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_CONNECTED)
@@ -4372,7 +4708,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4393,11 +4729,12 @@ drop:
return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
+static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
+ chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4442,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
+ psm = get_unaligned((__le16 *) skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4537,7 +4874,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
if (encrypt == 0x00) {
if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
} else if (chan->sec_level == BT_SECURITY_HIGH)
l2cap_chan_close(chan, ECONNREFUSED);
@@ -4558,7 +4894,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
if (hcon->type == LE_LINK) {
- smp_distribute_keys(conn, 0);
+ if (!status && encrypt)
+ smp_distribute_keys(conn, 0);
cancel_delayed_work(&conn->security_timer);
}
@@ -4586,6 +4923,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status && (chan->state == BT_CONNECTED ||
chan->state == BT_CONFIG)) {
+ struct sock *sk = chan->sk;
+
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ sk->sk_state_change(sk);
+
l2cap_check_encryption(chan, encrypt);
l2cap_chan_unlock(chan);
continue;
@@ -4595,7 +4937,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status) {
l2cap_send_conn_req(chan);
} else {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
@@ -4606,7 +4947,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
lock_sock(sk);
if (!status) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
@@ -4656,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
- u16 cid;
int len;
if (conn->rx_len) {
@@ -4677,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hdr = (struct l2cap_hdr *) skb->data;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
if (len == skb->len) {
/* Complete frame received */
@@ -4694,23 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
-
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
- lock_sock(sk);
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- release_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
- release_sock(sk);
- }
-
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c4fe583b0af6..3bb1611b9d48 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -82,7 +82,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
}
if (la.l2_cid)
- err = l2cap_add_scid(chan, la.l2_cid);
+ err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
else
err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
@@ -123,7 +123,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
if (la.l2_cid && la.l2_psm)
return -EINVAL;
- err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
+ err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
+ &la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
return err;
@@ -147,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
@@ -319,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -374,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ if (chan->conn)
+ sec.level = chan->conn->hcon->sec_level;
+ else
+ sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
@@ -391,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -591,10 +600,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
sk->sk_state = BT_CONFIG;
chan->state = BT_CONFIG;
- /* or for ACL link, under defer_setup time */
- } else if (sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup) {
- err = l2cap_chan_check_security(chan);
+ /* or for ACL link */
+ } else if ((sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
+ sk->sk_state == BT_CONNECTED) {
+ if (!l2cap_chan_check_security(chan))
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ else
+ sk->sk_state_change(sk);
} else {
err = -EINVAL;
}
@@ -611,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@@ -711,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- }
+ l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
- release_sock(sk);
return err;
}
@@ -732,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
pi->chan->state = BT_CONFIG;
@@ -926,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long len, int nb,
- int *err)
+ unsigned long len, int nb)
{
- struct sock *sk = chan->sk;
+ struct sk_buff *skb;
+ int err;
+
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ l2cap_chan_lock(chan);
+
+ if (!skb)
+ return ERR_PTR(err);
- return bt_skb_send_alloc(sk, len, nb, err);
+ return skb;
}
static struct l2cap_ops l2cap_chan_ops = {
@@ -947,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ l2cap_chan_put(l2cap_pi(sk)->chan);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -967,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
@@ -1005,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
} else {
chan->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->flags = 0;
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+
+ l2cap_chan_set_defaults(chan);
}
/* Default config options */
@@ -1047,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
+ chan = l2cap_chan_create();
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
+ l2cap_chan_hold(chan);
+
+ chan->sk = sk;
+
l2cap_pi(sk)->chan = chan;
return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fcff8887131..25d220776079 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,10 +35,9 @@
#include <net/bluetooth/smp.h>
bool enable_hs;
-bool enable_le;
#define MGMT_VERSION 1
-#define MGMT_REVISION 0
+#define MGMT_REVISION 1
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_CONFIRM_NAME,
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
};
static const u16 mgmt_events[] = {
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
ev->status = status;
if (rp)
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp.revision);
+ rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
sizeof(rp));
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
- u16 num_commands = ARRAY_SIZE(mgmt_commands);
- u16 num_events = ARRAY_SIZE(mgmt_events);
- u16 *opcode;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
size_t rp_size;
int i, err;
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
if (!rp)
return -ENOMEM;
- put_unaligned_le16(num_commands, &rp->num_commands);
- put_unaligned_le16(num_events, &rp->num_events);
+ rp->num_commands = __constant_cpu_to_le16(num_commands);
+ rp->num_events = __constant_cpu_to_le16(num_events);
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
put_unaligned_le16(mgmt_commands[i], opcode);
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
- put_unaligned_le16(count, &rp->num_controllers);
+ rp->num_controllers = cpu_to_le16(count);
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- put_unaligned_le16(d->id, &rp->index[i++]);
+ rp->index[i++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (enable_hs)
settings |= MGMT_SETTING_HS;
- if (enable_le) {
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
- }
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
return settings;
}
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
return 0;
}
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
+ val = get_unaligned_le32(&uuid128[12]);
if (val > 0xffff)
return 0;
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
ptr += (name_len + 2);
}
+ if (hdev->inq_tx_power) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
+
+ eir_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ eir_len += 10;
+ ptr += 10;
+ }
+
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
bacpy(&rp.bdaddr, &hdev->bdaddr);
rp.version = hdev->hci_ver;
-
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- timeout = get_unaligned_le16(&cp->timeout);
+ timeout = __le16_to_cpu(cp->timeout);
if (!cp->val && timeout > 0)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
- if (!enable_le || !(hdev->features[4] & LMP_LE)) {
+ if (!(hdev->features[4] & LMP_LE)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
&hci_cp);
- if (err < 0) {
+ if (err < 0)
mgmt_pending_remove(cmd);
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
failed:
hci_dev_unlock(hdev);
@@ -1368,10 +1381,8 @@ update_class:
}
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_link_key_info);
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
else
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->disconnect) {
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1613,22 +1622,22 @@ failed:
return err;
}
-static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
- return MGMT_ADDR_LE_PUBLIC;
- case ADDR_LE_DEV_RANDOM:
- return MGMT_ADDR_LE_RANDOM;
+ return BDADDR_LE_PUBLIC;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
}
- case ACL_LINK:
- return MGMT_ADDR_BREDR;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
}
}
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
- rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
- if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
continue;
i++;
}
- put_unaligned_le16(i, &rp->conn_count);
+ rp->conn_count = cpu_to_le16(i);
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct hci_conn *conn = cmd->user_data;
bacpy(&rp.addr.bdaddr, &conn->dst);
- rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
&rp, sizeof(rp));
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
else
- conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_BREDR)
+ if (type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
+ if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (hdev->discovery.state != DISCOVERY_STOPPED) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (hdev->discovery.state == DISCOVERY_FINDING) {
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ err = hci_cancel_inquiry(hdev);
else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
+ err = hci_cancel_le_scan(hdev);
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
- if (!e) {
- mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp);
+
+ break;
+
+ default:
+ BT_DBG("unknown discovery state %u", hdev->discovery.state);
+ err = -EFAULT;
}
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
if (err < 0)
mgmt_pending_remove(cmd);
else
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ update_eir(hdev);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -2523,13 +2581,18 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
- acp.interval = 0x0024; /* 22.5 msec page scan interval */
+
+ /* 22.5 msec page scan interval */
+ acp.interval = __constant_cpu_to_le16(0x0024);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
- acp.interval = 0x0800; /* default 1.28 sec page scan */
+
+ /* default 1.28 sec page scan */
+ acp.interval = __constant_cpu_to_le16(0x0800);
}
- acp.window = 0x0012; /* default 11.25 msec page scan window */
+ /* default 11.25 msec page scan window */
+ acp.window = __constant_cpu_to_le16(0x0012);
err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
&acp);
@@ -2560,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_ltk_info);
@@ -2586,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
- hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+ bdaddr_to_le(key->addr.type),
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@@ -2596,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return 0;
}
-struct mgmt_handler {
+static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
bool var_len;
@@ -2642,6 +2706,7 @@ struct mgmt_handler {
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
};
@@ -2652,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
struct mgmt_hdr *hdr;
u16 opcode, index, len;
struct hci_dev *hdev = NULL;
- struct mgmt_handler *handler;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2670,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
hdr = buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- index = get_unaligned_le16(&hdr->index);
- len = get_unaligned_le16(&hdr->len);
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
@@ -2879,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
{
struct mgmt_ev_new_link_key ev;
@@ -2887,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = MGMT_ADDR_BREDR;
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@@ -2903,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = key->bdaddr_type;
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.authenticated = key->authenticated;
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -2927,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u16 eir_len = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->flags = __cpu_to_le32(flags);
@@ -2936,10 +3002,10 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
name, name_len);
if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
- eir_len = eir_append_data(&ev->eir[eir_len], eir_len,
+ eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
sizeof(*ev) + eir_len, NULL);
@@ -2990,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
+ ev.type = link_to_bdaddr(link_type, addr_type);
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
sk);
if (sk)
- sock_put(sk);
+ sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
@@ -3016,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
mgmt_status(status), &rp, sizeof(rp));
@@ -3034,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_connect_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3045,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = MGMT_ADDR_BREDR;
+ ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@@ -3064,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3086,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3105,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.confirm_hint = confirm_hint;
- put_unaligned_le32(value, &ev.value);
+ ev.value = value;
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3121,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3140,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
&rp, sizeof(rp));
@@ -3183,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_auth_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3408,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
&hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ err = new_settings(hdev, NULL);
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
- cmd_status_rsp, &mgmt_err);
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
return err;
}
@@ -3450,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@@ -3464,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
@@ -3483,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
sizeof(*ev) + eir_len, NULL);
@@ -3589,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
module_param(enable_hs, bool, 0644);
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
-
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e9f70e..e8707debb864 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 4bf54b377255..aa5d73b786ac 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -48,13 +48,12 @@
static struct tty_driver *rfcomm_tty_driver;
struct rfcomm_dev {
+ struct tty_port port;
struct list_head list;
- atomic_t refcnt;
char name[12];
int id;
unsigned long flags;
- atomic_t opened;
int err;
bdaddr_t src;
@@ -64,9 +63,7 @@ struct rfcomm_dev {
uint modem_status;
struct rfcomm_dlc *dlc;
- struct tty_struct *tty;
wait_queue_head_t wait;
- struct work_struct wakeup_task;
struct device *tty_dev;
@@ -82,11 +79,18 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(struct work_struct *work);
-
/* ---- Device functions ---- */
-static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
+
+/*
+ * The reason this isn't actually a race, as you no doubt have a little voice
+ * screaming at you in your head, is that the refcount should never actually
+ * reach zero unless the device has already been taken off the list, in
+ * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
+ * rfcomm_dev_destruct() anyway.
+ */
+static void rfcomm_dev_destruct(struct tty_port *port)
{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
struct rfcomm_dlc *dlc = dev->dlc;
BT_DBG("dev %p dlc %p", dev, dlc);
@@ -113,23 +117,9 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
module_put(THIS_MODULE);
}
-static inline void rfcomm_dev_hold(struct rfcomm_dev *dev)
-{
- atomic_inc(&dev->refcnt);
-}
-
-static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
-{
- /* The reason this isn't actually a race, as you no
- doubt have a little voice screaming at you in your
- head, is that the refcount should never actually
- reach zero unless the device has already been taken
- off the list, in rfcomm_dev_del(). And if that's not
- true, we'll hit the BUG() in rfcomm_dev_destruct()
- anyway. */
- if (atomic_dec_and_test(&dev->refcnt))
- rfcomm_dev_destruct(dev);
-}
+static const struct tty_port_operations rfcomm_port_ops = {
+ .destruct = rfcomm_dev_destruct,
+};
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
@@ -154,7 +144,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
dev = NULL;
else
- rfcomm_dev_hold(dev);
+ tty_port_get(&dev->port);
}
spin_unlock(&rfcomm_dev_lock);
@@ -241,7 +231,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
sprintf(dev->name, "rfcomm%d", dev->id);
list_add(&dev->list, head);
- atomic_set(&dev->refcnt, 1);
bacpy(&dev->src, &req->src);
bacpy(&dev->dst, &req->dst);
@@ -250,10 +239,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
dev->flags = req->flags &
((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC));
- atomic_set(&dev->opened, 0);
-
+ tty_port_init(&dev->port);
+ dev->port.ops = &rfcomm_port_ops;
init_waitqueue_head(&dev->wait);
- INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
skb_queue_head_init(&dev->pending);
@@ -320,18 +308,23 @@ free:
static void rfcomm_dev_del(struct rfcomm_dev *dev)
{
+ unsigned long flags;
BT_DBG("dev %p", dev);
BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
- if (atomic_read(&dev->opened) > 0)
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (dev->port.count > 0) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&dev->port.lock, flags);
spin_lock(&rfcomm_dev_lock);
list_del_init(&dev->list);
spin_unlock(&rfcomm_dev_lock);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
/* ---- Send buffer ---- */
@@ -345,15 +338,16 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
static void rfcomm_wfree(struct sk_buff *skb)
{
struct rfcomm_dev *dev = (void *) skb->sk;
+ struct tty_struct *tty = dev->port.tty;
atomic_sub(skb->truesize, &dev->wmem_alloc);
- if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- queue_work(system_nrt_wq, &dev->wakeup_task);
- rfcomm_dev_put(dev);
+ if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
+ tty_wakeup(tty);
+ tty_port_put(&dev->port);
}
static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
- rfcomm_dev_hold(dev);
+ tty_port_get(&dev->port);
atomic_add(skb->truesize, &dev->wmem_alloc);
skb->sk = (void *) dev;
skb->destructor = rfcomm_wfree;
@@ -432,7 +426,7 @@ static int rfcomm_release_dev(void __user *arg)
return -ENODEV;
if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return -EPERM;
}
@@ -440,12 +434,12 @@ static int rfcomm_release_dev(void __user *arg)
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- if (dev->tty)
- tty_vhangup(dev->tty);
+ if (dev->port.tty)
+ tty_vhangup(dev->port.tty);
if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return 0;
}
@@ -523,7 +517,7 @@ static int rfcomm_get_dev_info(void __user *arg)
if (copy_to_user(arg, &di, sizeof(di)))
err = -EFAULT;
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return err;
}
@@ -559,7 +553,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
return;
}
- tty = dev->tty;
+ tty = dev->port.tty;
if (!tty || !skb_queue_empty(&dev->pending)) {
skb_queue_tail(&dev->pending, skb);
return;
@@ -585,13 +579,13 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
wake_up_interruptible(&dev->wait);
if (dlc->state == BT_CLOSED) {
- if (!dev->tty) {
+ if (!dev->port.tty) {
if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
/* Drop DLC lock here to avoid deadlock
* 1. rfcomm_dev_get will take rfcomm_dev_lock
* but in rfcomm_dev_add there's lock order:
* rfcomm_dev_lock -> dlc lock
- * 2. rfcomm_dev_put will deadlock if it's
+ * 2. tty_port_put will deadlock if it's
* the last reference
*/
rfcomm_dlc_unlock(dlc);
@@ -601,11 +595,11 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
}
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
rfcomm_dlc_lock(dlc);
}
} else
- tty_hangup(dev->tty);
+ tty_hangup(dev->port.tty);
}
}
@@ -618,8 +612,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
- if (dev->tty && !C_CLOCAL(dev->tty))
- tty_hangup(dev->tty);
+ if (dev->port.tty && !C_CLOCAL(dev->port.tty))
+ tty_hangup(dev->port.tty);
}
dev->modem_status =
@@ -630,21 +624,9 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(struct work_struct *work)
-{
- struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
- wakeup_task);
- struct tty_struct *tty = dev->tty;
- if (!tty)
- return;
-
- BT_DBG("dev %p tty %p", dev, tty);
- tty_wakeup(tty);
-}
-
static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
{
- struct tty_struct *tty = dev->tty;
+ struct tty_struct *tty = dev->port.tty;
struct sk_buff *skb;
int inserted = 0;
@@ -671,6 +653,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
DECLARE_WAITQUEUE(wait, current);
struct rfcomm_dev *dev;
struct rfcomm_dlc *dlc;
+ unsigned long flags;
int err, id;
id = tty->index;
@@ -686,10 +669,14 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
- dev->channel, atomic_read(&dev->opened));
+ dev->channel, dev->port.count);
- if (atomic_inc_return(&dev->opened) > 1)
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (++dev->port.count > 1) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
return 0;
+ }
+ spin_unlock_irqrestore(&dev->port.lock, flags);
dlc = dev->dlc;
@@ -697,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
- dev->tty = tty;
+ dev->port.tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
@@ -723,9 +710,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
break;
}
- tty_unlock();
+ tty_unlock(tty);
schedule();
- tty_lock();
+ tty_lock(tty);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
@@ -744,13 +731,17 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ unsigned long flags;
+
if (!dev)
return;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
- atomic_read(&dev->opened));
+ dev->port.count);
- if (atomic_dec_and_test(&dev->opened)) {
+ spin_lock_irqsave(&dev->port.lock, flags);
+ if (!--dev->port.count) {
+ spin_unlock_irqrestore(&dev->port.lock, flags);
if (dev->tty_dev->parent)
device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
@@ -758,11 +749,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- cancel_work_sync(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
- dev->tty = NULL;
+ dev->port.tty = NULL;
rfcomm_dlc_unlock(dev->dlc);
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
@@ -770,11 +760,12 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
list_del_init(&dev->list);
spin_unlock(&rfcomm_dev_lock);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
- }
+ } else
+ spin_unlock_irqrestore(&dev->port.lock, flags);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1083,7 +1074,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
if (rfcomm_dev_get(dev->id) == NULL)
return;
rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
}
}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f6ab12907963..cbdd313659a7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
+ HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
- conn = sco_conn_add(hcon, 0);
+ conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -277,17 +274,20 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
- struct sock *sk;
struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each(sk, node, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
- sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- write_lock(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock(&sco_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -537,21 +533,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb119875fd9..6fc7c4708f3e 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
- ident.ediv = cpu_to_le16(ediv);
+ ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba829de84423..929e48aed444 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -170,7 +170,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
- if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+ if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
br_fdb_change_mac_address(br, addr->sa_data);
@@ -317,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_add_slave = br_add_slave,
.ndo_del_slave = br_del_slave,
.ndo_fix_features = br_fix_features,
+ .ndo_fdb_add = br_fdb_add,
+ .ndo_fdb_del = br_fdb_delete,
+ .ndo_fdb_dump = br_fdb_dump,
};
static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c844d508..d21f32383517 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_port *op;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
- !compare_ether_addr(op->dev->dev_addr,
- f->addr.addr)) {
+ ether_addr_equal(op->dev->dev_addr,
+ f->addr.addr)) {
f->dst = op;
goto insert;
}
@@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *op;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
- !compare_ether_addr(op->dev->dev_addr,
- f->addr.addr)) {
+ ether_addr_equal(op->dev->dev_addr,
+ f->addr.addr)) {
f->dst = op;
goto skip_delete;
}
@@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr)) {
+ if (ether_addr_equal(fdb->addr.addr, addr)) {
if (unlikely(has_expired(br, fdb)))
break;
return fdb;
@@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry(fdb, h, head, hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr))
+ if (ether_addr_equal(fdb->addr.addr, addr))
return fdb;
}
return NULL;
@@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, head, hlist) {
- if (!compare_ether_addr(fdb->addr.addr, addr))
+ if (ether_addr_equal(fdb->addr.addr, addr))
return fdb;
}
return NULL;
@@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(fdb);
- NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
-
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+ goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_refcnt = 0;
- NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
-
+ if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -535,44 +535,38 @@ errout:
}
/* Dump information about entries, in response to GETNEIGH */
-int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int br_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
{
- struct net *net = sock_net(skb->sk);
- struct net_device *dev;
- int idx = 0;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- struct net_bridge *br = netdev_priv(dev);
- int i;
+ struct net_bridge *br = netdev_priv(dev);
+ int i;
- if (!(dev->priv_flags & IFF_EBRIDGE))
- continue;
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ goto out;
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct hlist_node *h;
- struct net_bridge_fdb_entry *f;
-
- hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
- if (idx < cb->args[0])
- goto skip;
+ for (i = 0; i < BR_HASH_SIZE; i++) {
+ struct hlist_node *h;
+ struct net_bridge_fdb_entry *f;
- if (fdb_fill_info(skb, br, f,
- NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI) < 0)
- break;
+ hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+ if (idx < cb->args[0])
+ goto skip;
+
+ if (fdb_fill_info(skb, br, f,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI) < 0)
+ break;
skip:
- ++idx;
- }
+ ++idx;
}
}
- rcu_read_unlock();
- cb->args[0] = idx;
-
- return skb->len;
+out:
+ return idx;
}
/* Update (create or replace) forwarding database entry */
@@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
}
/* Add new permanent fdb entry with RTM_NEWNEIGH */
-int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr, u16 nlh_flags)
{
- struct net *net = sock_net(skb->sk);
- struct ndmsg *ndm;
- struct nlattr *tb[NDA_MAX+1];
- struct net_device *dev;
struct net_bridge_port *p;
- const __u8 *addr;
- int err;
-
- ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
- if (err < 0)
- return err;
-
- ndm = nlmsg_data(nlh);
- if (ndm->ndm_ifindex == 0) {
- pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
- return -EINVAL;
- }
-
- dev = __dev_get_by_index(net, ndm->ndm_ifindex);
- if (dev == NULL) {
- pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
- return -ENODEV;
- }
-
- if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
- pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
- return -EINVAL;
- }
-
- addr = nla_data(tb[NDA_LLADDR]);
- if (!is_valid_ether_addr(addr)) {
- pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
- return -EINVAL;
- }
+ int err = 0;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
rcu_read_unlock();
} else {
spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+ err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
spin_unlock_bh(&p->br->hash_lock);
}
return err;
}
-static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
+static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
{
struct net_bridge *br = p->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
}
/* Remove neighbor entry with RTM_DELNEIGH */
-int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr)
{
- struct net *net = sock_net(skb->sk);
- struct ndmsg *ndm;
struct net_bridge_port *p;
- struct nlattr *llattr;
- const __u8 *addr;
- struct net_device *dev;
int err;
- ASSERT_RTNL();
- if (nlmsg_len(nlh) < sizeof(*ndm))
- return -EINVAL;
-
- ndm = nlmsg_data(nlh);
- if (ndm->ndm_ifindex == 0) {
- pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
- return -EINVAL;
- }
-
- dev = __dev_get_by_index(net, ndm->ndm_ifindex);
- if (dev == NULL) {
- pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
- return -ENODEV;
- }
-
- llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
- if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
- pr_info("bridge: RTM_DELNEIGH with invalid address\n");
- return -EINVAL;
- }
-
- addr = nla_data(llattr);
-
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..e9466d412707 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
p->state == BR_STATE_FORWARDING);
}
-static inline unsigned packet_length(const struct sk_buff *skb)
+static inline unsigned int packet_length(const struct sk_buff *skb)
{
return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
}
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
+ br_drop_fake_rtable(skb);
dev_queue_xmit(skb);
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5a31731be4d0..76f15fda0212 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -216,7 +216,7 @@ forward:
}
/* fall through */
case BR_STATE_LEARNING:
- if (!compare_ether_addr(p->br->dev->dev_addr, dest))
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae9220b..b66581208cb2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -36,6 +36,8 @@
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
+static void br_multicast_start_querier(struct net_bridge *br);
+
#if IS_ENABLED(CONFIG_IPV6)
static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
{
@@ -241,7 +243,6 @@ static void br_multicast_group_expired(unsigned long data)
hlist_del_rcu(&mp->hlist[mdb->ver]);
mdb->size--;
- del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
out:
@@ -271,7 +272,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
- del_timer(&p->query_timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
@@ -460,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
hopopt[3] = 2; /* Length of RA Option */
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
hopopt[5] = 0;
- hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
- hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
+ hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
+ hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
skb_put(skb, sizeof(*ip6h) + 8);
@@ -507,74 +507,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
return NULL;
}
-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
-{
- struct net_bridge *br = mp->br;
- struct sk_buff *skb;
-
- skb = br_multicast_alloc_query(br, &mp->addr);
- if (!skb)
- goto timer;
-
- netif_rx(skb);
-
-timer:
- if (++mp->queries_sent < br->multicast_last_member_count)
- mod_timer(&mp->query_timer,
- jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_group_query_expired(unsigned long data)
-{
- struct net_bridge_mdb_entry *mp = (void *)data;
- struct net_bridge *br = mp->br;
-
- spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || !mp->mglist ||
- mp->queries_sent >= br->multicast_last_member_count)
- goto out;
-
- br_multicast_send_group_query(mp);
-
-out:
- spin_unlock(&br->multicast_lock);
-}
-
-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
-{
- struct net_bridge_port *port = pg->port;
- struct net_bridge *br = port->br;
- struct sk_buff *skb;
-
- skb = br_multicast_alloc_query(br, &pg->addr);
- if (!skb)
- goto timer;
-
- br_deliver(port, skb);
-
-timer:
- if (++pg->queries_sent < br->multicast_last_member_count)
- mod_timer(&pg->query_timer,
- jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_port_group_query_expired(unsigned long data)
-{
- struct net_bridge_port_group *pg = (void *)data;
- struct net_bridge_port *port = pg->port;
- struct net_bridge *br = port->br;
-
- spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
- pg->queries_sent >= br->multicast_last_member_count)
- goto out;
-
- br_multicast_send_port_group_query(pg);
-
-out:
- spin_unlock(&br->multicast_lock);
-}
-
static struct net_bridge_mdb_entry *br_multicast_get_group(
struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, int hash)
@@ -582,8 +514,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct hlist_node *p;
- unsigned count = 0;
- unsigned max;
+ unsigned int count = 0;
+ unsigned int max;
int elasticity;
int err;
@@ -690,8 +622,6 @@ rehash:
mp->addr = *group;
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
- setup_timer(&mp->query_timer, br_multicast_group_query_expired,
- (unsigned long)mp);
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
@@ -746,8 +676,6 @@ static int br_multicast_add_group(struct net_bridge *br,
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
- setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
- (unsigned long)p);
rcu_assign_pointer(*pp, p);
@@ -814,6 +742,20 @@ static void br_multicast_local_router_expired(unsigned long data)
{
}
+static void br_multicast_querier_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || br->multicast_disabled)
+ goto out;
+
+ br_multicast_start_querier(br);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *ip)
@@ -840,6 +782,7 @@ static void br_multicast_send_query(struct net_bridge *br,
struct br_ip br_group;
if (!netif_running(br->dev) || br->multicast_disabled ||
+ !br->multicast_querier ||
timer_pending(&br->multicast_querier_timer))
return;
@@ -1291,9 +1234,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
mod_timer(&mp->timer, time);
-
- mp->queries_sent = 0;
- mod_timer(&mp->query_timer, now);
}
goto out;
@@ -1310,9 +1250,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
-
- p->queries_sent = 0;
- mod_timer(&p->query_timer, now);
}
break;
@@ -1361,8 +1298,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct sk_buff *skb2 = skb;
const struct iphdr *iph;
struct igmphdr *ih;
- unsigned len;
- unsigned offset;
+ unsigned int len;
+ unsigned int offset;
int err;
/* We treat OOM as packet loss for now. */
@@ -1462,7 +1399,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
u8 icmp6_type;
u8 nexthdr;
__be16 frag_off;
- unsigned len;
+ unsigned int len;
int offset;
int err;
@@ -1628,6 +1565,7 @@ void br_multicast_init(struct net_bridge *br)
br->hash_max = 512;
br->multicast_router = 1;
+ br->multicast_querier = 0;
br->multicast_last_member_count = 2;
br->multicast_startup_query_count = 2;
@@ -1642,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br)
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
setup_timer(&br->multicast_querier_timer,
- br_multicast_local_router_expired, 0);
+ br_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
(unsigned long)br);
}
@@ -1681,7 +1619,6 @@ void br_multicast_stop(struct net_bridge *br)
hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
- del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
@@ -1770,9 +1707,23 @@ unlock:
return err;
}
-int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+static void br_multicast_start_querier(struct net_bridge *br)
{
struct net_bridge_port *port;
+
+ br_multicast_open(br);
+
+ list_for_each_entry(port, &br->port_list, list) {
+ if (port->state == BR_STATE_DISABLED ||
+ port->state == BR_STATE_BLOCKING)
+ continue;
+
+ __br_multicast_enable_port(port);
+ }
+}
+
+int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+{
int err = 0;
struct net_bridge_mdb_htable *mdb;
@@ -1802,14 +1753,7 @@ rollback:
goto rollback;
}
- br_multicast_open(br);
- list_for_each_entry(port, &br->port_list, list) {
- if (port->state == BR_STATE_DISABLED ||
- port->state == BR_STATE_BLOCKING)
- continue;
-
- __br_multicast_enable_port(port);
- }
+ br_multicast_start_querier(br);
unlock:
spin_unlock_bh(&br->multicast_lock);
@@ -1817,6 +1761,24 @@ unlock:
return err;
}
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
+{
+ val = !!val;
+
+ spin_lock_bh(&br->multicast_lock);
+ if (br->multicast_querier == val)
+ goto unlock;
+
+ br->multicast_querier = val;
+ if (val)
+ br_multicast_start_querier(br);
+
+unlock:
+ spin_unlock_bh(&br->multicast_lock);
+
+ return 0;
+}
+
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
{
int err = -ENOENT;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..e41456bd3cc6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -54,12 +54,14 @@ static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
static int brnf_filter_vlan_tagged __read_mostly = 0;
static int brnf_filter_pppoe_tagged __read_mostly = 0;
+static int brnf_pass_vlan_indev __read_mostly = 0;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
#define brnf_call_arptables 1
#define brnf_filter_vlan_tagged 0
#define brnf_filter_pppoe_tagged 0
+#define brnf_pass_vlan_indev 0
#endif
#define IS_IP(skb) \
@@ -156,7 +158,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM | DST_NOPEER;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
}
@@ -503,6 +505,19 @@ bridged_dnat:
return 0;
}
+static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct net_device *vlan, *br;
+
+ br = bridge_parent(dev);
+ if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
+ return br;
+
+ vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);
+
+ return vlan ? vlan : br;
+}
+
/* Some common code for IPv4/IPv6 */
static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
@@ -515,7 +530,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
- skb->dev = bridge_parent(skb->dev);
+ skb->dev = brnf_get_logical_dev(skb, skb->dev);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->mask |= BRNF_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
@@ -543,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb)
int optlen = nh[off + 1] + 2;
switch (nh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
@@ -694,11 +709,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct rtable *rt = skb_rtable(skb);
-
- if (rt && rt == bridge_parent_rtable(in))
- skb_dst_drop(skb);
-
+ br_drop_fake_rtable(skb);
return NF_ACCEPT;
}
@@ -778,7 +789,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
else
skb->protocol = htons(ETH_P_IPV6);
- NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
+ NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
br_nf_forward_finish);
return NF_STOLEN;
@@ -1006,12 +1017,13 @@ static ctl_table brnf_table[] = {
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
- { }
-};
-
-static struct ctl_path brnf_path[] = {
- { .procname = "net", },
- { .procname = "bridge", },
+ {
+ .procname = "bridge-nf-pass-vlan-input-dev",
+ .data = &brnf_pass_vlan_indev,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = brnf_sysctl_call_tables,
+ },
{ }
};
#endif
@@ -1030,7 +1042,7 @@ int __init br_netfilter_init(void)
return ret;
}
#ifdef CONFIG_SYSCTL
- brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
+ brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
@@ -1047,7 +1059,7 @@ void br_netfilter_fini(void)
{
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(brnf_sysctl_header);
+ unregister_net_sysctl_table(brnf_sysctl_header);
#endif
dst_entries_destroy(&fake_dst_ops);
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a1daf8227ed1..2080485515f1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
- NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex);
- NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
- NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate);
-
- if (dev->addr_len)
- NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
- if (dev->ifindex != dev->iflink)
- NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
- if (event == RTM_NEWLINK)
- NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
-
+ if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+ nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
+ nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+ nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
+ (dev->addr_len &&
+ nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+ (dev->ifindex != dev->iflink &&
+ nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+ (event == RTM_NEWLINK &&
+ nla_put_u8(skb, IFLA_PROTINFO, port->state)))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -91,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
int err = -ENOBUFS;
br_debug(port->br, "port %u(%s) event %d\n",
- (unsigned)port->port_no, port->dev->name, event);
+ (unsigned int)port->port_no, port->dev->name, event);
skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
@@ -235,18 +232,6 @@ int __init br_netlink_init(void)
br_rtm_setlink, NULL, NULL);
if (err)
goto err3;
- err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
- br_fdb_add, NULL, NULL);
- if (err)
- goto err3;
- err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
- br_fdb_delete, NULL, NULL);
- if (err)
- goto err3;
- err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
- NULL, br_fdb_dump, NULL);
- if (err)
- goto err3;
return 0;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63ad7a8..1a8ad4fb9a6b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@ struct net_bridge_port_group {
struct hlist_node mglist;
struct rcu_head rcu;
struct timer_list timer;
- struct timer_list query_timer;
struct br_ip addr;
- u32 queries_sent;
};
struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
struct net_bridge_port_group __rcu *ports;
struct rcu_head rcu;
struct timer_list timer;
- struct timer_list query_timer;
struct br_ip addr;
bool mglist;
- u32 queries_sent;
};
struct net_bridge_mdb_htable
@@ -228,6 +224,7 @@ struct net_bridge
unsigned char multicast_router;
u8 multicast_disabled:1;
+ u8 multicast_querier:1;
u32 hash_elasticity;
u32 hash_max;
@@ -363,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br,
extern void br_fdb_update(struct net_bridge *br,
struct net_bridge_port *source,
const unsigned char *addr);
-extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+
+extern int br_fdb_delete(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr);
+extern int br_fdb_add(struct ndmsg *nlh,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 nlh_flags);
+extern int br_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx);
/* br_forward.c */
extern void br_deliver(const struct net_bridge_port *to,
@@ -421,6 +427,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
extern int br_multicast_set_port_router(struct net_bridge_port *p,
unsigned long val);
extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
static inline bool br_multicast_is_router(struct net_bridge *br)
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 05ed9bc7e426..0c0fe36e7aa9 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -29,10 +29,9 @@
#define BR_MIN_PATH_COST 1
#define BR_MAX_PATH_COST 65535
-struct br_config_bpdu
-{
- unsigned topology_change:1;
- unsigned topology_change_ack:1;
+struct br_config_bpdu {
+ unsigned int topology_change:1;
+ unsigned int topology_change_ack:1;
bridge_id root;
int root_path_cost;
bridge_id bridge_id;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d96ba76..af9a12099ba4 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = {
void br_log_state(const struct net_bridge_port *p)
{
br_info(p->br, "port %u(%s) entered %s state\n",
- (unsigned) p->port_no, p->dev->name,
+ (unsigned int) p->port_no, p->dev->name,
br_port_state_names[p->state]);
}
@@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
{
if (br_is_designated_port(p)) {
br_info(p->br, "port %u(%s) received tcn bpdu\n",
- (unsigned) p->port_no, p->dev->name);
+ (unsigned int) p->port_no, p->dev->name);
br_topology_change_detection(p->br);
br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index e16aade51ae0..fd30a6022dea 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (p->state == BR_STATE_DISABLED)
goto out;
- if (compare_ether_addr(dest, br->group_addr) != 0)
+ if (!ether_addr_equal(dest, br->group_addr))
goto out;
buf = skb_pull(skb, 3);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index f494496373d6..9d5a414a3943 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
/* called under bridge lock */
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
{
- /* should be aligned on 2 bytes for compare_ether_addr() */
+ /* should be aligned on 2 bytes for ether_addr_equal() */
unsigned short oldaddr_aligned[ETH_ALEN >> 1];
unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
struct net_bridge_port *p;
@@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
memcpy(br->dev->dev_addr, addr, ETH_ALEN);
list_for_each_entry(p, &br->port_list, list) {
- if (!compare_ether_addr(p->designated_bridge.addr, oldaddr))
+ if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
- if (!compare_ether_addr(p->designated_root.addr, oldaddr))
+ if (ether_addr_equal(p->designated_root.addr, oldaddr))
memcpy(p->designated_root.addr, addr, ETH_ALEN);
-
}
br_configuration_update(br);
@@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
br_become_root_bridge(br);
}
-/* should be aligned on 2 bytes for compare_ether_addr() */
+/* should be aligned on 2 bytes for ether_addr_equal() */
static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
@@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
}
- if (compare_ether_addr(br->bridge_id.addr, addr) == 0)
+ if (ether_addr_equal(br->bridge_id.addr, addr))
return false; /* no change */
br_stp_change_bridge_id(br, addr);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0f9975..a6747e673426 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg)
return;
br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
- (unsigned) p->port_no, p->dev->name,
+ (unsigned int) p->port_no, p->dev->name,
id->prio[0], id->prio[1], &id->addr);
/*
@@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
struct net_bridge *br = p->br;
br_debug(br, "port %u(%s) forward delay timer\n",
- (unsigned) p->port_no, p->dev->name);
+ (unsigned int) p->port_no, p->dev->name);
spin_lock(&br->lock);
if (p->state == BR_STATE_LISTENING) {
p->state = BR_STATE_LEARNING;
@@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg)
struct net_bridge_port *p = (struct net_bridge_port *) arg;
br_debug(p->br, "port %u(%s) hold timer expired\n",
- (unsigned) p->port_no, p->dev->name);
+ (unsigned int) p->port_no, p->dev->name);
spin_lock(&p->br->lock);
if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e43984..c5c059333eab 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
- unsigned new_addr[6];
+ unsigned int new_addr[6];
int i;
if (!capable(CAP_NET_ADMIN))
@@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d,
static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
show_multicast_snooping, store_multicast_snooping);
+static ssize_t show_multicast_querier(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%d\n", br->multicast_querier);
+}
+
+static ssize_t store_multicast_querier(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_multicast_set_querier);
+}
+static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
+ show_multicast_querier, store_multicast_querier);
+
static ssize_t show_hash_elasticity(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = {
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&dev_attr_multicast_router.attr,
&dev_attr_multicast_snooping.attr,
+ &dev_attr_multicast_querier.attr,
&dev_attr_hash_elasticity.attr,
&dev_attr_hash_max.attr,
&dev_attr_multicast_last_member_count.attr,
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 5b33a2e634a6..071d87214dde 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
!(info->bitmask & EBT_STP_MASK))
return -EINVAL;
/* Make sure the match only receives stp frames */
- if (compare_ether_addr(e->destmac, bridge_ula) ||
- compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+ if (!ether_addr_equal(e->destmac, bridge_ula) ||
+ !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
return -EINVAL;
return 0;
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index 936361e5a2b6..d3694953b1d7 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -25,7 +25,7 @@ config CAIF_DEBUG
bool "Enable Debug"
depends on CAIF
default n
- --- help ---
+ ---help---
Enable the inclusion of debug code in the CAIF stack.
Be aware that doing this will impact performance.
If unsure say N.
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 5016fa57b623..fb8944355264 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/caif/caif_socket.h>
-#include <linux/atomic.h>
+#include <linux/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/caif/caif_layer.h>
@@ -130,11 +130,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
- if (net_ratelimit())
- pr_debug("sending flow OFF (queue len = %d %d)\n",
- atomic_read(&cf_sk->sk.sk_rmem_alloc),
- sk_rcvbuf_lowwater(cf_sk));
+ (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+ net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- if (net_ratelimit())
- pr_debug("sending flow OFF due to rmem_schedule\n");
+ net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
@@ -505,6 +503,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
memset(skb->cb, 0, sizeof(struct caif_payload_info));
+ cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
if (cf_sk->layer.dn == NULL) {
kfree_skb(skb);
@@ -1062,6 +1061,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
/* Store the protocol */
sk->sk_protocol = (unsigned char) protocol;
+ /* Initialize default priority for well-known cases */
+ switch (protocol) {
+ case CAIFPROTO_AT:
+ sk->sk_priority = TC_PRIO_CONTROL;
+ break;
+ case CAIFPROTO_RFM:
+ sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ break;
+ default:
+ sk->sk_priority = TC_PRIO_BESTEFFORT;
+ }
+
/*
* Lock in order to try to stop someone from opening the socket
* too early.
@@ -1081,7 +1092,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
set_rx_flow_on(cf_sk);
/* Set default options on configuration */
- cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
cf_sk->conn_req.protocol = protocol;
release_sock(&cf_sk->sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf52225692e..047cd0eec022 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
cfctrl->serv.dev_info.id = physlinkid;
cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
cfpkt_addbdy(pkt, physlinkid);
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
dn->transmit(dn, pkt);
}
@@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
* might arrive with the newly allocated channel ID.
*/
cfpkt_info(pkt)->dev_info->id = param->phyid;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
ret =
dn->transmit(dn, pkt);
if (ret < 0) {
@@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
cfpkt_addbdy(pkt, channelid);
init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
ret =
dn->transmit(dn, pkt);
#ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba859b97..863dedd91bb6 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
memcpy(skb2->data, split, len2nd);
skb2->tail += len2nd;
skb2->len += len2nd;
+ skb2->priority = skb->priority;
return skb_to_pkt(skb2);
}
@@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
}
EXPORT_SYMBOL(cfpkt_info);
+
+void cfpkt_set_prio(struct cfpkt *pkt, int prio)
+{
+ pkt_to_skb(pkt)->priority = prio;
+}
+EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 4aa33d4496b6..dd485f6128e8 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
return layr->dn->transmit(layr->dn, pkt);
}
case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
info->channel_id = service->layer.id;
info->hdr_len = 1;
info->dev_info = &service->dev_info;
+ cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
return layr->dn->transmit(layr->dn, pkt);
}
default:
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 20618dd3088b..69771c04ba8f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
skb->protocol = htons(ETH_P_IPV6);
break;
default:
+ kfree_skb(skb);
priv->netdev->stats.rx_errors++;
return -EINVAL;
}
@@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len > priv->netdev->mtu) {
pr_warn("Size of skb exceeded MTU\n");
+ kfree_skb(skb);
dev->stats.tx_errors++;
- return -ENOSPC;
+ return NETDEV_TX_OK;
}
if (!priv->flowenabled) {
pr_debug("dropping packets flow off\n");
+ kfree_skb(skb);
dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
if (result) {
dev->stats.tx_dropped++;
- return result;
+ return NETDEV_TX_OK;
}
/* Update statistics. */
@@ -421,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct chnl_net *priv;
u8 loop;
priv = netdev_priv(dev);
- NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
- priv->conn_req.sockaddr.u.dgm.connection_id);
- NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
- priv->conn_req.sockaddr.u.dgm.connection_id);
+ if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id) ||
+ nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id))
+ goto nla_put_failure;
loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
- NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
-
-
+ if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
+ goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
diff --git a/net/can/gw.c b/net/can/gw.c
index 3d79b127881e..b41acf25668f 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS("can-gw");
-HLIST_HEAD(cgw_list);
+static HLIST_HEAD(cgw_list);
static struct notifier_block notifier;
static struct kmem_cache *cgw_cache __read_mostly;
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 214c2bb43d62..925ca583c09c 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_auth_none_info *ai = ac->private;
struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer(
dout("built authorizer len %d\n", au->buf_len);
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf;
- *len = au->buf_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf;
+ auth->authorizer_buf_len = au->buf_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
bad2:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 1587dc6010c6..a16bf14eb027 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_x_authorizer *au;
struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@ static int ceph_x_create_authorizer(
return ret;
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf->vec.iov_base;
- *len = au->buf->vec.iov_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf->vec.iov_base;
+ auth->authorizer_buf_len = au->buf->vec.iov_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
}
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index e02da7a5c5a1..f459e93b774f 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -13,7 +13,7 @@
*/
struct ceph_x_ticket_handler {
struct rb_node node;
- unsigned service;
+ unsigned int service;
struct ceph_crypto_key session_key;
struct ceph_timespec validity;
@@ -27,7 +27,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
struct ceph_buffer *buf;
- unsigned service;
+ unsigned int service;
u64 nonce;
char reply_buf[128]; /* big enough for encrypted blob */
};
@@ -38,7 +38,7 @@ struct ceph_x_info {
bool starting;
u64 server_challenge;
- unsigned have_keys;
+ unsigned int have_keys;
struct rb_root ticket_handlers;
struct ceph_x_authorizer auth_authorizer;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cc913193d992..a776f751edbf 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id);
* create a fresh client instance
*/
struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
- unsigned supported_features,
- unsigned required_features)
+ unsigned int supported_features,
+ unsigned int required_features)
{
struct ceph_client *client;
struct ceph_entity_addr *myaddr = NULL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 0a1b53bce76d..67bb1f11e613 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -20,7 +20,7 @@
c = c - a; c = c - b; c = c ^ (b >> 15); \
} while (0)
-unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
+unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
{
const unsigned char *k = (const unsigned char *)str;
__u32 a, b, c; /* the internal state */
@@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
/*
* linux dcache hash
*/
-unsigned ceph_str_hash_linux(const char *str, unsigned length)
+unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
{
unsigned long hash = 0;
unsigned char c;
@@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length)
}
-unsigned ceph_str_hash(int type, const char *s, unsigned len)
+unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
{
switch (type) {
case CEPH_STR_HASH_LINUX:
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index d6ebb13a18a4..089613234f03 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg)
* @b: bucket pointer
* @p: item index in bucket
*/
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
{
- if (p >= b->size)
+ if ((__u32)p >= b->size)
return 0;
switch (b->alg) {
@@ -37,38 +37,13 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
case CRUSH_BUCKET_LIST:
return ((struct crush_bucket_list *)b)->item_weights[p];
case CRUSH_BUCKET_TREE:
- if (p & 1)
- return ((struct crush_bucket_tree *)b)->node_weights[p];
- return 0;
+ return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
}
return 0;
}
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
- int i, b, c;
-
- for (b = 0; b < map->max_buckets; b++) {
- if (map->buckets[b] == NULL)
- continue;
- for (i = 0; i < map->buckets[b]->size; i++) {
- c = map->buckets[b]->items[i];
- BUG_ON(c >= map->max_devices ||
- c < -map->max_buckets);
- if (c >= 0)
- map->device_parents[c] = map->buckets[b]->id;
- else
- map->bucket_parents[-1-c] = map->buckets[b]->id;
- }
- }
-}
-
void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
{
kfree(b->h.perm);
@@ -87,6 +62,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b)
void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
{
+ kfree(b->h.perm);
+ kfree(b->h.items);
kfree(b->node_weights);
kfree(b);
}
@@ -124,10 +101,9 @@ void crush_destroy_bucket(struct crush_bucket *b)
*/
void crush_destroy(struct crush_map *map)
{
- int b;
-
/* buckets */
if (map->buckets) {
+ __s32 b;
for (b = 0; b < map->max_buckets; b++) {
if (map->buckets[b] == NULL)
continue;
@@ -138,13 +114,12 @@ void crush_destroy(struct crush_map *map)
/* rules */
if (map->rules) {
+ __u32 b;
for (b = 0; b < map->max_rules; b++)
kfree(map->rules[b]);
kfree(map->rules);
}
- kfree(map->bucket_parents);
- kfree(map->device_parents);
kfree(map);
}
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index b79747c4b645..d7edc24333b8 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -20,6 +20,7 @@
#include <linux/crush/crush.h>
#include <linux/crush/hash.h>
+#include <linux/crush/mapper.h>
/*
* Implement the core CRUSH mapping algorithm.
@@ -32,9 +33,9 @@
* @type: storage ruleset type (user defined)
* @size: output set size
*/
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
{
- int i;
+ __u32 i;
for (i = 0; i < map->max_rules; i++) {
if (map->rules[i] &&
@@ -68,11 +69,11 @@ int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
static int bucket_perm_choose(struct crush_bucket *bucket,
int x, int r)
{
- unsigned pr = r % bucket->size;
- unsigned i, s;
+ unsigned int pr = r % bucket->size;
+ unsigned int i, s;
/* start a new permutation if @x has changed */
- if (bucket->perm_x != x || bucket->perm_n == 0) {
+ if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
dprintk("bucket %d new x=%d\n", bucket->id, x);
bucket->perm_x = x;
@@ -100,13 +101,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
for (i = 0; i < bucket->perm_n; i++)
dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
while (bucket->perm_n <= pr) {
- unsigned p = bucket->perm_n;
+ unsigned int p = bucket->perm_n;
/* no point in swapping the final entry */
if (p < bucket->size - 1) {
i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
(bucket->size - p);
if (i) {
- unsigned t = bucket->perm[p + i];
+ unsigned int t = bucket->perm[p + i];
bucket->perm[p + i] = bucket->perm[p];
bucket->perm[p] = t;
}
@@ -152,8 +153,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket,
return bucket->h.items[i];
}
- BUG_ON(1);
- return 0;
+ dprintk("bad list sums for bucket %d\n", bucket->h.id);
+ return bucket->h.items[0];
}
@@ -219,7 +220,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket,
static int bucket_straw_choose(struct crush_bucket_straw *bucket,
int x, int r)
{
- int i;
+ __u32 i;
int high = 0;
__u64 high_draw = 0;
__u64 draw;
@@ -239,6 +240,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+ BUG_ON(in->size == 0);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -253,7 +255,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
return bucket_straw_choose((struct crush_bucket_straw *)in,
x, r);
default:
- BUG_ON(1);
+ dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
return in->items[0];
}
}
@@ -262,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
* true if device is marked "out" (failed, fully offloaded)
* of the cluster
*/
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
{
if (weight[item] >= 0x10000)
return 0;
@@ -287,16 +289,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
* @recurse_to_leaf: true if we want one device under each item of given type
* @out2: second output vector for leaf items (if @recurse_to_leaf)
*/
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
struct crush_bucket *bucket,
- __u32 *weight,
+ const __u32 *weight,
int x, int numrep, int type,
int *out, int outpos,
int firstn, int recurse_to_leaf,
int *out2)
{
int rep;
- int ftotal, flocal;
+ unsigned int ftotal, flocal;
int retry_descent, retry_bucket, skip_rep;
struct crush_bucket *in = bucket;
int r;
@@ -304,7 +306,7 @@ static int crush_choose(struct crush_map *map,
int item = 0;
int itemtype;
int collide, reject;
- const int orig_tries = 5; /* attempts before we fall back to search */
+ const unsigned int orig_tries = 5; /* attempts before we fall back to search */
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
@@ -325,7 +327,7 @@ static int crush_choose(struct crush_map *map,
r = rep;
if (in->alg == CRUSH_BUCKET_UNIFORM) {
/* be careful */
- if (firstn || numrep >= in->size)
+ if (firstn || (__u32)numrep >= in->size)
/* r' = r + f_total */
r += ftotal;
else if (in->size % numrep == 0)
@@ -354,7 +356,11 @@ static int crush_choose(struct crush_map *map,
item = bucket_perm_choose(in, x, r);
else
item = crush_bucket_choose(in, x, r);
- BUG_ON(item >= map->max_devices);
+ if (item >= map->max_devices) {
+ dprintk(" bad item %d\n", item);
+ skip_rep = 1;
+ break;
+ }
/* desired type? */
if (item < 0)
@@ -365,8 +371,12 @@ static int crush_choose(struct crush_map *map,
/* keep going? */
if (itemtype != type) {
- BUG_ON(item >= 0 ||
- (-1-item) >= map->max_buckets);
+ if (item >= 0 ||
+ (-1-item) >= map->max_buckets) {
+ dprintk(" bad item type %d\n", type);
+ skip_rep = 1;
+ break;
+ }
in = map->buckets[-1-item];
retry_bucket = 1;
continue;
@@ -415,7 +425,7 @@ reject:
if (collide && flocal < 3)
/* retry locally a few times */
retry_bucket = 1;
- else if (flocal < in->size + orig_tries)
+ else if (flocal <= in->size + orig_tries)
/* exhaustive bucket search */
retry_bucket = 1;
else if (ftotal < 20)
@@ -425,7 +435,7 @@ reject:
/* else give up */
skip_rep = 1;
dprintk(" reject %d collide %d "
- "ftotal %d flocal %d\n",
+ "ftotal %u flocal %u\n",
reject, collide, ftotal,
flocal);
}
@@ -454,15 +464,12 @@ reject:
* @x: hash input
* @result: pointer to result vector
* @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
*/
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
int ruleno, int x, int *result, int result_max,
- int force, __u32 *weight)
+ const __u32 *weight)
{
int result_len;
- int force_context[CRUSH_MAX_DEPTH];
- int force_pos = -1;
int a[CRUSH_MAX_SET];
int b[CRUSH_MAX_SET];
int c[CRUSH_MAX_SET];
@@ -473,66 +480,44 @@ int crush_do_rule(struct crush_map *map,
int osize;
int *tmp;
struct crush_rule *rule;
- int step;
+ __u32 step;
int i, j;
int numrep;
int firstn;
- BUG_ON(ruleno >= map->max_rules);
+ if ((__u32)ruleno >= map->max_rules) {
+ dprintk(" bad ruleno %d\n", ruleno);
+ return 0;
+ }
rule = map->rules[ruleno];
result_len = 0;
w = a;
o = b;
- /*
- * determine hierarchical context of force, if any. note
- * that this may or may not correspond to the specific types
- * referenced by the crush rule.
- */
- if (force >= 0 &&
- force < map->max_devices &&
- map->device_parents[force] != 0 &&
- !is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
- }
-
for (step = 0; step < rule->len; step++) {
+ struct crush_rule_step *curstep = &rule->steps[step];
+
firstn = 0;
- switch (rule->steps[step].op) {
+ switch (curstep->op) {
case CRUSH_RULE_TAKE:
- w[0] = rule->steps[step].arg1;
-
- /* find position in force_context/hierarchy */
- while (force_pos >= 0 &&
- force_context[force_pos] != w[0])
- force_pos--;
- /* and move past it */
- if (force_pos >= 0)
- force_pos--;
-
+ w[0] = curstep->arg1;
wsize = 1;
break;
case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
+ /* fall through */
case CRUSH_RULE_CHOOSE_LEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
- BUG_ON(wsize == 0);
+ if (wsize == 0)
+ break;
recurse_to_leaf =
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_INDEP;
/* reset output */
@@ -544,32 +529,18 @@ int crush_do_rule(struct crush_map *map,
* basically, numrep <= 0 means relative to
* the provided result_max
*/
- numrep = rule->steps[step].arg1;
+ numrep = curstep->arg1;
if (numrep <= 0) {
numrep += result_max;
if (numrep <= 0)
continue;
}
j = 0;
- if (osize == 0 && force_pos >= 0) {
- /* skip any intermediate types */
- while (force_pos &&
- force_context[force_pos] < 0 &&
- rule->steps[step].arg2 !=
- map->buckets[-1 -
- force_context[force_pos]]->type)
- force_pos--;
- o[osize] = force_context[force_pos];
- if (recurse_to_leaf)
- c[osize] = force_context[0];
- j++;
- force_pos--;
- }
osize += crush_choose(map,
map->buckets[-1-w[i]],
weight,
x, numrep,
- rule->steps[step].arg2,
+ curstep->arg2,
o+osize, j,
firstn,
recurse_to_leaf, c+osize);
@@ -596,7 +567,9 @@ int crush_do_rule(struct crush_map *map,
break;
default:
- BUG_ON(1);
+ dprintk(" unknown op %d at step %d\n",
+ curstep->op, step);
+ break;
}
}
return result_len;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 27d4ea315d12..54b531a01121 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -94,9 +94,9 @@ static int monc_show(struct seq_file *s, void *p)
mutex_lock(&monc->mutex);
if (monc->have_mdsmap)
- seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+ seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
if (monc->have_osdmap)
- seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+ seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
if (monc->want_next_osdmap)
seq_printf(s, "want next osdmap\n");
@@ -146,7 +146,7 @@ static int osdc_show(struct seq_file *s, void *pp)
if (req->r_reassert_version.epoch)
seq_printf(s, "\t%u'%llu",
- (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+ (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
le64_to_cpu(req->r_reassert_version.version));
else
seq_printf(s, "\t");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f0993af2ae4d..524f4e4f598b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -653,54 +653,57 @@ static void prepare_write_keepalive(struct ceph_connection *con)
* Connection negotiation.
*/
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+ int *auth_proto)
{
- void *auth_buf;
- int auth_len = 0;
- int auth_protocol = 0;
+ struct ceph_auth_handshake *auth;
+
+ if (!con->ops->get_authorizer) {
+ con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+ con->out_connect.authorizer_len = 0;
+
+ return NULL;
+ }
+
+ /* Can't hold the mutex while getting authorizer */
mutex_unlock(&con->mutex);
- if (con->ops->get_authorizer)
- con->ops->get_authorizer(con, &auth_buf, &auth_len,
- &auth_protocol, &con->auth_reply_buf,
- &con->auth_reply_buf_len,
- con->auth_retry);
+
+ auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state))
- return -EAGAIN;
+ if (IS_ERR(auth))
+ return auth;
+ if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+ return ERR_PTR(-EAGAIN);
- con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
- con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+ con->auth_reply_buf = auth->authorizer_reply_buf;
+ con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
- if (auth_len)
- ceph_con_out_kvec_add(con, auth_len, auth_buf);
- return 0;
+ return auth;
}
/*
* We connected to a peer and are saying hello.
*/
-static void prepare_write_banner(struct ceph_messenger *msgr,
- struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
{
- ceph_con_out_kvec_reset(con);
ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
- ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
- &msgr->my_enc_addr);
+ ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+ &con->msgr->my_enc_addr);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
}
-static int prepare_write_connect(struct ceph_messenger *msgr,
- struct ceph_connection *con,
- int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
{
- unsigned global_seq = get_global_seq(con->msgr, 0);
+ unsigned int global_seq = get_global_seq(con->msgr, 0);
int proto;
+ int auth_proto;
+ struct ceph_auth_handshake *auth;
switch (con->peer_name.type) {
case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +722,32 @@ static int prepare_write_connect(struct ceph_messenger *msgr,
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = cpu_to_le64(msgr->supported_features);
+ con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
con->out_connect.protocol_version = cpu_to_le32(proto);
con->out_connect.flags = 0;
- if (include_banner)
- prepare_write_banner(msgr, con);
- else
- ceph_con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+ auth_proto = CEPH_AUTH_UNKNOWN;
+ auth = get_connect_authorizer(con, &auth_proto);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+ con->out_connect.authorizer_len = auth ?
+ cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+ ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+ &con->out_connect);
+ if (auth && auth->authorizer_buf_len)
+ ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+ auth->authorizer_buf);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
- return prepare_connect_authorizer(con);
+ return 0;
}
/*
@@ -816,7 +828,7 @@ static void iter_bio_next(struct bio **bio_iter, int *seg)
static int write_partial_msg_pages(struct ceph_connection *con)
{
struct ceph_msg *msg = con->out_msg;
- unsigned data_len = le32_to_cpu(msg->hdr.data_len);
+ unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
size_t len;
bool do_datacrc = !con->msgr->nocrc;
int ret;
@@ -992,11 +1004,10 @@ static int prepare_read_message(struct ceph_connection *con)
static int read_partial(struct ceph_connection *con,
- int *to, int size, void *object)
+ int end, int size, void *object)
{
- *to += size;
- while (con->in_base_pos < *to) {
- int left = *to - con->in_base_pos;
+ while (con->in_base_pos < end) {
+ int left = end - con->in_base_pos;
int have = size - left;
int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
if (ret <= 0)
@@ -1012,37 +1023,52 @@ static int read_partial(struct ceph_connection *con,
*/
static int read_partial_banner(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
/* peer's banner */
- ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+ size = strlen(CEPH_BANNER);
+ end = size;
+ ret = read_partial(con, end, size, con->in_banner);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
- &con->actual_peer_addr);
+
+ size = sizeof (con->actual_peer_addr);
+ end += size;
+ ret = read_partial(con, end, size, &con->actual_peer_addr);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
- &con->peer_addr_for_me);
+
+ size = sizeof (con->peer_addr_for_me);
+ end += size;
+ ret = read_partial(con, end, size, &con->peer_addr_for_me);
if (ret <= 0)
goto out;
+
out:
return ret;
}
static int read_partial_connect(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
- ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+ size = sizeof (con->in_reply);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_reply);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
- con->auth_reply_buf);
+
+ size = le32_to_cpu(con->in_reply.authorizer_len);
+ end += size;
+ ret = read_partial(con, end, size, con->auth_reply_buf);
if (ret <= 0)
goto out;
@@ -1377,7 +1403,8 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
- ret = prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
@@ -1397,7 +1424,10 @@ static int process_connect(struct ceph_connection *con)
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
/* Tell ceph about it. */
@@ -1420,7 +1450,10 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_connect.connect_seq));
con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1434,7 +1467,10 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_connect.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_connect.global_seq));
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1491,10 +1527,10 @@ static int process_connect(struct ceph_connection *con)
*/
static int read_partial_ack(struct ceph_connection *con)
{
- int to = 0;
+ int size = sizeof (con->in_temp_ack);
+ int end = size;
- return read_partial(con, &to, sizeof(con->in_temp_ack),
- &con->in_temp_ack);
+ return read_partial(con, end, size, &con->in_temp_ack);
}
@@ -1554,7 +1590,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
static int read_partial_message_pages(struct ceph_connection *con,
struct page **pages,
- unsigned data_len, bool do_datacrc)
+ unsigned int data_len, bool do_datacrc)
{
void *p;
int ret;
@@ -1587,7 +1623,7 @@ static int read_partial_message_pages(struct ceph_connection *con,
#ifdef CONFIG_BLOCK
static int read_partial_message_bio(struct ceph_connection *con,
struct bio **bio_iter, int *bio_seg,
- unsigned data_len, bool do_datacrc)
+ unsigned int data_len, bool do_datacrc)
{
struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
void *p;
@@ -1627,9 +1663,10 @@ static int read_partial_message_bio(struct ceph_connection *con,
static int read_partial_message(struct ceph_connection *con)
{
struct ceph_msg *m = con->in_msg;
+ int size;
+ int end;
int ret;
- int to, left;
- unsigned front_len, middle_len, data_len;
+ unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
int skip;
u64 seq;
@@ -1638,15 +1675,11 @@ static int read_partial_message(struct ceph_connection *con)
dout("read_partial_message con %p msg %p\n", con, m);
/* header */
- while (con->in_base_pos < sizeof(con->in_hdr)) {
- left = sizeof(con->in_hdr) - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock,
- (char *)&con->in_hdr + con->in_base_pos,
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (con->in_hdr);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_hdr);
+ if (ret <= 0)
+ return ret;
crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1792,12 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- to = sizeof(m->hdr) + sizeof(m->footer);
- while (con->in_base_pos < to) {
- left = to - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
- (con->in_base_pos - sizeof(m->hdr)),
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (m->footer);
+ end += size;
+ ret = read_partial(con, end, size, &m->footer);
+ if (ret <= 0)
+ return ret;
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1864,6 @@ static void process_message(struct ceph_connection *con)
*/
static int try_write(struct ceph_connection *con)
{
- struct ceph_messenger *msgr = con->msgr;
int ret = 1;
dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1874,11 @@ more:
/* open the socket first? */
if (con->sock == NULL) {
- prepare_write_connect(msgr, con, 1);
+ ceph_con_out_kvec_reset(con);
+ prepare_write_banner(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ goto out;
prepare_read_banner(con);
set_bit(CONNECTING, &con->state);
clear_bit(NEGOTIATING, &con->state);
@@ -2345,9 +2377,9 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
{
mutex_lock(&con->mutex);
if (con->in_msg && con->in_msg == msg) {
- unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
- unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
- unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
+ unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
+ unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
+ unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
/* skip rest of message */
dout("con_revoke_pages %p msg %p revoked\n", con, msg);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 1845cde26227..10d6008d31f2 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -168,7 +168,7 @@ static bool __sub_expired(struct ceph_mon_client *monc)
*/
static void __schedule_delayed(struct ceph_mon_client *monc)
{
- unsigned delay;
+ unsigned int delay;
if (monc->cur_mon < 0 || __sub_expired(monc))
delay = 10 * HZ;
@@ -184,7 +184,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
static void __send_subscribe(struct ceph_mon_client *monc)
{
dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
- (unsigned)monc->sub_sent, __sub_expired(monc),
+ (unsigned int)monc->sub_sent, __sub_expired(monc),
monc->want_next_osdmap);
if ((__sub_expired(monc) && !monc->sub_sent) ||
monc->want_next_osdmap == 1) {
@@ -201,7 +201,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
if (monc->want_next_osdmap) {
dout("__send_subscribe to 'osdmap' %u\n",
- (unsigned)monc->have_osdmap);
+ (unsigned int)monc->have_osdmap);
ceph_encode_string(&p, end, "osdmap", 6);
i = p;
i->have = cpu_to_le64(monc->have_osdmap);
@@ -211,7 +211,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
}
if (monc->want_mdsmap) {
dout("__send_subscribe to 'mdsmap' %u+\n",
- (unsigned)monc->have_mdsmap);
+ (unsigned int)monc->have_mdsmap);
ceph_encode_string(&p, end, "mdsmap", 6);
i = p;
i->have = cpu_to_le64(monc->have_mdsmap);
@@ -236,7 +236,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
static void handle_subscribe_ack(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
- unsigned seconds;
+ unsigned int seconds;
struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
if (msg->front.iov_len < sizeof(*h))
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5e254055c910..1ffebed5ce0f 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -278,7 +278,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
{
dst->op = cpu_to_le16(src->op);
- switch (dst->op) {
+ switch (src->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
dst->extent.offset =
@@ -664,11 +664,11 @@ static void put_osd(struct ceph_osd *osd)
{
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
- if (atomic_dec_and_test(&osd->o_ref)) {
+ if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
- if (osd->o_authorizer)
- ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
kfree(osd);
}
}
@@ -841,6 +841,12 @@ static void register_request(struct ceph_osd_client *osdc,
static void __unregister_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
+ if (RB_EMPTY_NODE(&req->r_node)) {
+ dout("__unregister_request %p tid %lld not registered\n",
+ req, req->r_tid);
+ return;
+ }
+
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
rb_erase(&req->r_node, &osdc->requests);
osdc->num_requests--;
@@ -1214,7 +1220,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
}
if (!req->r_got_reply) {
- unsigned bytes;
+ unsigned int bytes;
req->r_result = le32_to_cpu(rhead->result);
bytes = le32_to_cpu(msg->hdr.data_len);
@@ -2108,37 +2114,32 @@ static void put_osd_con(struct ceph_connection *con)
/*
* authentication
*/
-static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately. Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+ int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- int ret = 0;
+ struct ceph_auth_handshake *auth = &o->o_auth;
- if (force_new && o->o_authorizer) {
- ac->ops->destroy_authorizer(ac, o->o_authorizer);
- o->o_authorizer = NULL;
- }
- if (o->o_authorizer == NULL) {
- ret = ac->ops->create_authorizer(
- ac, CEPH_ENTITY_TYPE_OSD,
- &o->o_authorizer,
- &o->o_authorizer_buf,
- &o->o_authorizer_buf_len,
- &o->o_authorizer_reply_buf,
- &o->o_authorizer_reply_buf_len);
+ if (force_new && auth->authorizer) {
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, auth->authorizer);
+ auth->authorizer = NULL;
+ }
+ if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+ int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+ auth);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
-
*proto = ac->protocol;
- *buf = o->o_authorizer_buf;
- *len = o->o_authorizer_buf_len;
- *reply_buf = o->o_authorizer_reply_buf;
- *reply_len = o->o_authorizer_reply_buf_len;
- return 0;
+
+ return auth;
}
@@ -2148,7 +2149,11 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+ /*
+ * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+ * XXX which do we do: succeed or fail?
+ */
+ return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- if (ac->ops->invalidate_authorizer)
+ if (ac->ops && ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
return ceph_monc_validate_auth(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 29ad46ec9dcf..81e3b84a77ef 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -38,7 +38,7 @@ done:
/* maps */
-static int calc_bits_of(unsigned t)
+static int calc_bits_of(unsigned int t)
{
int b = 0;
while (t) {
@@ -154,20 +154,13 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
magic = ceph_decode_32(p);
if (magic != CRUSH_MAGIC) {
pr_err("crush_decode magic %x != current %x\n",
- (unsigned)magic, (unsigned)CRUSH_MAGIC);
+ (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
goto bad;
}
c->max_buckets = ceph_decode_32(p);
c->max_rules = ceph_decode_32(p);
c->max_devices = ceph_decode_32(p);
- c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
- if (c->device_parents == NULL)
- goto badmem;
- c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
- if (c->bucket_parents == NULL)
- goto badmem;
-
c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
if (c->buckets == NULL)
goto badmem;
@@ -460,7 +453,7 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
{
- unsigned n, m;
+ unsigned int n, m;
ceph_decode_copy(p, &pi->v, sizeof(pi->v));
calc_pg_masks(pi);
@@ -890,8 +883,12 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
pglen = ceph_decode_32(p);
if (pglen) {
- /* insert */
ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+ /* removing existing (if any) */
+ (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+ /* insert */
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
if (!pg) {
err = -ENOMEM;
@@ -970,7 +967,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
objsetno = stripeno / su_per_object;
*ono = objsetno * sc + stripepos;
- dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
+ dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
/* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
t = off;
@@ -998,12 +995,11 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap)
{
- unsigned num, num_mask;
+ unsigned int num, num_mask;
struct ceph_pg pgid;
- s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
int poolid = le32_to_cpu(fl->fl_pg_pool);
struct ceph_pg_pool_info *pool;
- unsigned ps;
+ unsigned int ps;
BUG_ON(!osdmap);
@@ -1011,23 +1007,13 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
if (!pool)
return -EIO;
ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
- if (preferred >= 0) {
- ps += preferred;
- num = le32_to_cpu(pool->v.lpg_num);
- num_mask = pool->lpg_num_mask;
- } else {
- num = le32_to_cpu(pool->v.pg_num);
- num_mask = pool->pg_num_mask;
- }
+ num = le32_to_cpu(pool->v.pg_num);
+ num_mask = pool->pg_num_mask;
pgid.ps = cpu_to_le16(ps);
- pgid.preferred = cpu_to_le16(preferred);
+ pgid.preferred = cpu_to_le16(-1);
pgid.pool = fl->fl_pg_pool;
- if (preferred >= 0)
- dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
- (int)preferred);
- else
- dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+ dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
ol->ol_pgid = pgid;
ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
- unsigned poolid, ps, pps, t;
- int preferred;
+ unsigned int poolid, ps, pps, t, r;
poolid = le32_to_cpu(pgid.pool);
ps = le16_to_cpu(pgid.ps);
- preferred = (s16)le16_to_cpu(pgid.preferred);
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
if (!pool)
return NULL;
/* pg_temp? */
- if (preferred >= 0)
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
- pool->lpgp_num_mask);
- else
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
- pool->pgp_num_mask);
+ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+ pool->pgp_num_mask);
pgid.ps = cpu_to_le16(t);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
@@ -1080,23 +1060,20 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
return NULL;
}
- /* don't forcefeed bad device ids to crush */
- if (preferred >= osdmap->max_osd ||
- preferred >= osdmap->crush->max_devices)
- preferred = -1;
-
- if (preferred >= 0)
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.lpgp_num),
- pool->lpgp_num_mask);
- else
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.pgp_num),
- pool->pgp_num_mask);
+ pps = ceph_stable_mod(ps,
+ le32_to_cpu(pool->v.pgp_num),
+ pool->pgp_num_mask);
pps += poolid;
- *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
- min_t(int, pool->v.size, *num),
- preferred, osdmap->osd_weight);
+ r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+ min_t(int, pool->v.size, *num),
+ osdmap->osd_weight);
+ if (r < 0) {
+ pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+ " size %d\n", r, poolid, pool->v.crush_ruleset,
+ pool->v.type, pool->v.size);
+ return NULL;
+ }
+ *num = r;
return osds;
}
diff --git a/net/compat.c b/net/compat.c
index e055708b8ec9..1b96281892de 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -328,14 +328,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
__scm_destroy(scm);
}
-/*
- * A struct sock_filter is architecture independent.
- */
-struct compat_sock_fprog {
- u16 len;
- compat_uptr_t filter; /* struct sock_filter * */
-};
-
static int do_set_attach_filter(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -741,13 +733,13 @@ static unsigned char nas[21] = {
};
#undef AL
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags)
+ unsigned int vlen, unsigned int flags)
{
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT);
@@ -758,20 +750,20 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
{
return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
- unsigned flags, struct sockaddr __user *addr,
+ unsigned int flags, struct sockaddr __user *addr,
int __user *addrlen)
{
return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
}
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags,
+ unsigned int vlen, unsigned int flags,
struct compat_timespec __user *timeout)
{
int datagrams;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e4fbfd6e2bd4..ae6acf6a3dea 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -65,7 +65,7 @@ static inline int connection_based(struct sock *sk)
return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
}
-static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
void *key)
{
unsigned long bits = (unsigned long)key;
@@ -158,7 +158,7 @@ out_noerr:
* quite explicitly by POSIX 1003.1g, don't change them without having
* the standard around please.
*/
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
int *peeked, int *off, int *err)
{
struct sk_buff *skb;
@@ -216,7 +216,7 @@ no_packet:
}
EXPORT_SYMBOL(__skb_recv_datagram);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
int noblock, int *err)
{
int peeked, off = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index c25d453b2803..cd0981977f5c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -208,7 +208,8 @@ static inline void dev_base_seq_inc(struct net *net)
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
- unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+ unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
}
@@ -299,10 +300,9 @@ static const unsigned short netdev_lock_type[] =
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
- ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
- ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
- ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
- ARPHRD_VOID, ARPHRD_NONE};
+ ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
+ ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
+ ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -317,10 +317,9 @@ static const char *const netdev_lock_name[] =
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
- "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
- "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
- "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
- "_xmit_VOID", "_xmit_NONE"};
+ "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
+ "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
+ "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1409,14 +1408,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
* register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
+ *
+ * After unregistering unregister and down device events are synthesized
+ * for all devices on the device list to the removed notifier to remove
+ * the need for special case cleanup code.
*/
int unregister_netdevice_notifier(struct notifier_block *nb)
{
+ struct net_device *dev;
+ struct net *net;
int err;
rtnl_lock();
err = raw_notifier_chain_unregister(&netdev_chain, nb);
+ if (err)
+ goto unlock;
+
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ if (dev->flags & IFF_UP) {
+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+ nb->notifier_call(nb, NETDEV_DOWN, dev);
+ }
+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
+ }
+ }
+unlock:
rtnl_unlock();
return err;
}
@@ -1597,10 +1616,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
}
skb->skb_iif = 0;
- skb_set_dev(skb, dev);
+ skb->dev = dev;
+ skb_dst_drop(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1652,10 +1675,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (skb_network_header(skb2) < skb2->data ||
skb2->network_header > skb2->tail) {
- if (net_ratelimit())
- pr_crit("protocol %04x is buggy, dev %s\n",
- ntohs(skb2->protocol),
- dev->name);
+ net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
+ ntohs(skb2->protocol),
+ dev->name);
skb_reset_network_header(skb2);
}
@@ -1849,36 +1871,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
-/**
- * skb_dev_set -- assign a new device to a buffer
- * @skb: buffer for the new device
- * @dev: network device
- *
- * If an skb is owned by a device already, we have to reset
- * all data private to the namespace a device belongs to
- * before assigning it a new device.
- */
-#ifdef CONFIG_NET_NS
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
- skb_dst_drop(skb);
- if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
- secpath_reset(skb);
- nf_reset(skb);
- skb_init_secmark(skb);
- skb->mark = 0;
- skb->priority = 0;
- skb->nf_trace = 0;
- skb->ipvs_property = 0;
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
-#endif
- }
- skb->dev = dev;
-}
-EXPORT_SYMBOL(skb_set_dev);
-#endif /* CONFIG_NET_NS */
-
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features = 0;
@@ -2322,11 +2314,9 @@ EXPORT_SYMBOL(__skb_tx_hash);
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
- if (net_ratelimit()) {
- pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
- }
+ net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
return 0;
}
return queue_index;
@@ -2568,17 +2558,15 @@ int dev_queue_xmit(struct sk_buff *skb)
}
}
HARD_TX_UNLOCK(dev, txq);
- if (net_ratelimit())
- pr_crit("Virtual device %s asks to queue packet!\n",
- dev->name);
+ net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
+ dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately
*/
recursion_alert:
- if (net_ratelimit())
- pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
- dev->name);
+ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
}
}
@@ -3059,9 +3047,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
struct Qdisc *q;
if (unlikely(MAX_RED_LOOP < ttl++)) {
- if (net_ratelimit())
- pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
- skb->skb_iif, dev->ifindex);
+ net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
+ skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
}
@@ -3521,10 +3508,16 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
break;
case GRO_DROP:
- case GRO_MERGED_FREE:
kfree_skb(skb);
break;
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ kmem_cache_free(skbuff_head_cache, skb);
+ else
+ __kfree_skb(skb);
+ break;
+
case GRO_HELD:
case GRO_MERGED:
break;
@@ -3609,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
}
EXPORT_SYMBOL(napi_frags_finish);
-struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
struct ethhdr *eth;
@@ -3644,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi)
out:
return skb;
}
-EXPORT_SYMBOL(napi_frags_skb);
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
@@ -4598,9 +4590,9 @@ void dev_set_rx_mode(struct net_device *dev)
*
* Get the combination of flag bits exported through APIs to userspace.
*/
-unsigned dev_get_flags(const struct net_device *dev)
+unsigned int dev_get_flags(const struct net_device *dev)
{
- unsigned flags;
+ unsigned int flags;
flags = (dev->flags & ~(IFF_PROMISC |
IFF_ALLMULTI |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 626698f0db8b..c4cc2bc49f06 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -21,12 +21,35 @@
* General list handling functions
*/
+static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
+ unsigned char *addr, int addr_len,
+ unsigned char addr_type, bool global)
+{
+ struct netdev_hw_addr *ha;
+ int alloc_size;
+
+ alloc_size = sizeof(*ha);
+ if (alloc_size < L1_CACHE_BYTES)
+ alloc_size = L1_CACHE_BYTES;
+ ha = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!ha)
+ return -ENOMEM;
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+ ha->synced = false;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+
+ return 0;
+}
+
static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
unsigned char *addr, int addr_len,
unsigned char addr_type, bool global)
{
struct netdev_hw_addr *ha;
- int alloc_size;
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
@@ -46,21 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
}
}
-
- alloc_size = sizeof(*ha);
- if (alloc_size < L1_CACHE_BYTES)
- alloc_size = L1_CACHE_BYTES;
- ha = kmalloc(alloc_size, GFP_ATOMIC);
- if (!ha)
- return -ENOMEM;
- memcpy(ha->addr, addr, addr_len);
- ha->type = addr_type;
- ha->refcount = 1;
- ha->global_use = global;
- ha->synced = false;
- list_add_tail_rcu(&ha->list, &list->list);
- list->count++;
- return 0;
+ return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
}
static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
@@ -377,6 +386,34 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
*/
/**
+ * dev_uc_add_excl - Add a global secondary unicast address
+ * @dev: device
+ * @addr: address to add
+ */
+int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netif_addr_lock_bh(dev);
+ list_for_each_entry(ha, &dev->uc.list, list) {
+ if (!memcmp(ha->addr, addr, dev->addr_len) &&
+ ha->type == NETDEV_HW_ADDR_T_UNICAST) {
+ err = -EEXIST;
+ goto out;
+ }
+ }
+ err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST, true);
+ if (!err)
+ __dev_set_rx_mode(dev);
+out:
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_add_excl);
+
+/**
* dev_uc_add - Add a secondary unicast address
* @dev: device
* @addr: address to add
@@ -501,6 +538,34 @@ EXPORT_SYMBOL(dev_uc_init);
* Multicast list handling functions
*/
+/**
+ * dev_mc_add_excl - Add a global secondary multicast address
+ * @dev: device
+ * @addr: address to add
+ */
+int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netif_addr_lock_bh(dev);
+ list_for_each_entry(ha, &dev->mc.list, list) {
+ if (!memcmp(ha->addr, addr, dev->addr_len) &&
+ ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
+ err = -EEXIST;
+ goto out;
+ }
+ }
+ err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, true);
+ if (!err)
+ __dev_set_rx_mode(dev);
+out:
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_mc_add_excl);
+
static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
bool global)
{
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..ea5fb9fcc3f5 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -4,6 +4,8 @@
* Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -22,6 +24,7 @@
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/genetlink.h>
#include <net/netevent.h>
@@ -42,13 +45,14 @@ static void send_dm_alert(struct work_struct *unused);
* netlink alerts
*/
static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
struct per_cpu_dm_data {
struct work_struct dm_alert_work;
- struct sk_buff *skb;
+ struct sk_buff __rcu *skb;
atomic_t dm_hit_count;
struct timer_list send_timer;
+ int cpu;
};
struct dm_hw_stat_delta {
@@ -79,29 +83,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
size_t al;
struct net_dm_alert_msg *msg;
struct nlattr *nla;
+ struct sk_buff *skb;
+ struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
al += sizeof(struct nlattr);
- data->skb = genlmsg_new(al, GFP_KERNEL);
- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- atomic_set(&data->dm_hit_count, dm_hit_limit);
+ skb = genlmsg_new(al, GFP_KERNEL);
+
+ if (skb) {
+ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ } else
+ schedule_work_on(data->cpu, &data->dm_alert_work);
+
+ /*
+ * Don't need to lock this, since we are guaranteed to only
+ * run this on a single cpu at a time.
+ * Note also that we only update data->skb if the old and new skb
+ * pointers don't match. This ensures that we don't continually call
+ * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+ */
+ if (skb != oskb) {
+ rcu_assign_pointer(data->skb, skb);
+
+ synchronize_rcu();
+
+ atomic_set(&data->dm_hit_count, dm_hit_limit);
+ }
+
}
static void send_dm_alert(struct work_struct *unused)
{
struct sk_buff *skb;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ WARN_ON_ONCE(data->cpu != smp_processor_id());
/*
* Grab the skb we're about to send
*/
- skb = data->skb;
+ skb = rcu_dereference_protected(data->skb, 1);
/*
* Replace it with a new one
@@ -111,8 +139,10 @@ static void send_dm_alert(struct work_struct *unused)
/*
* Ship it!
*/
- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ if (skb)
+ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ put_cpu_var(dm_cpu_data);
}
/*
@@ -123,9 +153,11 @@ static void send_dm_alert(struct work_struct *unused)
*/
static void sched_send_work(unsigned long unused)
{
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ schedule_work_on(smp_processor_id(), &data->dm_alert_work);
- schedule_work(&data->dm_alert_work);
+ put_cpu_var(dm_cpu_data);
}
static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,9 +166,16 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
struct nlmsghdr *nlh;
struct nlattr *nla;
int i;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct sk_buff *dskb;
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+ rcu_read_lock();
+ dskb = rcu_dereference(data->skb);
+
+ if (!dskb)
+ goto out;
+
if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
/*
* we're already at zero, discard this hit
@@ -144,12 +183,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
goto out;
}
- nlh = (struct nlmsghdr *)data->skb->data;
+ nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla);
for (i = 0; i < msg->entries; i++) {
if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
msg->points[i].count++;
+ atomic_inc(&data->dm_hit_count);
goto out;
}
}
@@ -157,7 +197,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
/*
* We need to create a new entry
*/
- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
msg->points[msg->entries].count = 1;
@@ -169,6 +209,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
}
out:
+ rcu_read_unlock();
+ put_cpu_var(dm_cpu_data);
return;
}
@@ -213,7 +255,7 @@ static int set_all_monitor_traces(int state)
struct dm_hw_stat_delta *new_stat = NULL;
struct dm_hw_stat_delta *temp;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
if (state == trace_state) {
rc = -EAGAIN;
@@ -222,9 +264,15 @@ static int set_all_monitor_traces(int state)
switch (state) {
case TRACE_ON:
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -ENODEV;
+ break;
+ }
+
rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
break;
+
case TRACE_OFF:
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
@@ -240,6 +288,9 @@ static int set_all_monitor_traces(int state)
kfree_rcu(new_stat, rcu);
}
}
+
+ module_put(THIS_MODULE);
+
break;
default:
rc = 1;
@@ -252,7 +303,7 @@ static int set_all_monitor_traces(int state)
rc = -EINPROGRESS;
out_unlock:
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
return rc;
}
@@ -295,12 +346,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
new_stat->dev = dev;
new_stat->last_rx = jiffies;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_add_rcu(&new_stat->list, &hw_stats_list);
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
case NETDEV_UNREGISTER:
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
new_stat->dev = NULL;
@@ -311,7 +362,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
}
}
}
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
}
out:
@@ -342,10 +393,10 @@ static int __init init_net_drop_monitor(void)
struct per_cpu_dm_data *data;
int cpu, rc;
- printk(KERN_INFO "Initializing network drop monitor service\n");
+ pr_info("Initializing network drop monitor service\n");
if (sizeof(void *) > 8) {
- printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n");
+ pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
return -ENOSPC;
}
@@ -353,27 +404,29 @@ static int __init init_net_drop_monitor(void)
dropmon_ops,
ARRAY_SIZE(dropmon_ops));
if (rc) {
- printk(KERN_ERR "Could not create drop monitor netlink family\n");
+ pr_err("Could not create drop monitor netlink family\n");
return rc;
}
rc = register_netdevice_notifier(&dropmon_net_notifier);
if (rc < 0) {
- printk(KERN_CRIT "Failed to register netdevice notifier\n");
+ pr_crit("Failed to register netdevice notifier\n");
goto out_unreg;
}
rc = 0;
- for_each_present_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
- reset_per_cpu_data(data);
+ data->cpu = cpu;
INIT_WORK(&data->dm_alert_work, send_dm_alert);
init_timer(&data->send_timer);
data->send_timer.data = cpu;
data->send_timer.function = sched_send_work;
+ reset_per_cpu_data(data);
}
+
goto out;
out_unreg:
@@ -382,4 +435,37 @@ out:
return rc;
}
-late_initcall(init_net_drop_monitor);
+static void exit_net_drop_monitor(void)
+{
+ struct per_cpu_dm_data *data;
+ int cpu;
+
+ BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+
+ /*
+ * Because of the module_get/put we do in the trace state change path
+ * we are guarnateed not to have any current users when we get here
+ * all we need to do is make sure that we don't have any running timers
+ * or pending schedule calls
+ */
+
+ for_each_possible_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+ del_timer_sync(&data->send_timer);
+ cancel_work_sync(&data->dm_alert_work);
+ /*
+ * At this point, we should have exclusive access
+ * to this struct and can free the skb inside it
+ */
+ kfree_skb(data->skb);
+ }
+
+ BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+}
+
+module_init(init_net_drop_monitor);
+module_exit(exit_net_drop_monitor);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6d6d7d25caaa..9c2afb480270 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
#include <linux/errno.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
@@ -36,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ return 0;
+}
+EXPORT_SYMBOL(ethtool_op_get_ts_info);
+
/* Handlers for each ethtool command */
#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -738,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
return 0;
}
-static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
+ int (*getter)(struct net_device *,
+ struct ethtool_eeprom *, u8 *),
+ u32 total_len)
{
struct ethtool_eeprom eeprom;
- const struct ethtool_ops *ops = dev->ethtool_ops;
void __user *userbuf = useraddr + sizeof(eeprom);
u32 bytes_remaining;
u8 *data;
int ret = 0;
- if (!ops->get_eeprom || !ops->get_eeprom_len)
- return -EOPNOTSUPP;
-
if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
return -EFAULT;
@@ -758,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
return -EINVAL;
/* Check for exceeding total eeprom len */
- if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ if (eeprom.offset + eeprom.len > total_len)
return -EINVAL;
data = kmalloc(PAGE_SIZE, GFP_USER);
@@ -769,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
while (bytes_remaining > 0) {
eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
- ret = ops->get_eeprom(dev, &eeprom, data);
+ ret = getter(dev, &eeprom, data);
if (ret)
break;
if (copy_to_user(userbuf, data, eeprom.len)) {
@@ -790,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
+static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
+ ops->get_eeprom_len(dev));
+}
+
static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
{
struct ethtool_eeprom eeprom;
@@ -1278,6 +1301,81 @@ out:
return ret;
}
+static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
+{
+ int err = 0;
+ struct ethtool_ts_info info;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GET_TS_INFO;
+
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+
+ err = phydev->drv->ts_info(phydev, &info);
+
+ } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
+
+ err = ops->get_ts_info(dev, &info);
+
+ } else {
+ info.so_timestamping =
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info.phc_index = -1;
+ }
+
+ if (err)
+ return err;
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int ethtool_get_module_info(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ struct ethtool_modinfo modinfo;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_module_info)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
+ return -EFAULT;
+
+ ret = ops->get_module_info(dev, &modinfo);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_get_module_eeprom(struct net_device *dev,
+ void __user *useraddr)
+{
+ int ret;
+ struct ethtool_modinfo modinfo;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_module_info || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ ret = ops->get_module_info(dev, &modinfo);
+ if (ret)
+ return ret;
+
+ return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+ modinfo.eeprom_len);
+}
+
/* The main entry point in this file. Called from net/core/dev.c */
int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1295,11 +1393,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return -EFAULT;
if (!dev->ethtool_ops) {
- /* ETHTOOL_GDRVINFO does not require any driver support.
- * It is also unprivileged and does not change anything,
- * so we can take a shortcut to it. */
+ /* A few commands do not require any driver support,
+ * are unprivileged, and do not change anything, so we
+ * can take a shortcut to them. */
if (ethcmd == ETHTOOL_GDRVINFO)
return ethtool_get_drvinfo(dev, useraddr);
+ else if (ethcmd == ETHTOOL_GET_TS_INFO)
+ return ethtool_get_ts_info(dev, useraddr);
else
return -EOPNOTSUPP;
}
@@ -1330,6 +1430,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
case ETHTOOL_GFEATURES:
+ case ETHTOOL_GET_TS_INFO:
break;
default:
if (!capable(CAP_NET_ADMIN))
@@ -1496,6 +1597,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GET_DUMP_DATA:
rc = ethtool_get_dump_data(dev, useraddr);
break;
+ case ETHTOOL_GET_TS_INFO:
+ rc = ethtool_get_ts_info(dev, useraddr);
+ break;
+ case ETHTOOL_GMODULEINFO:
+ rc = ethtool_get_module_info(dev, useraddr);
+ break;
+ case ETHTOOL_GMODULEEEPROM:
+ rc = ethtool_get_module_eeprom(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index c02e63c908da..72cceb79d0d4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh = nlmsg_data(nlh);
frh->family = ops->family;
frh->table = rule->table;
- NLA_PUT_U32(skb, FRA_TABLE, rule->table);
+ if (nla_put_u32(skb, FRA_TABLE, rule->table))
+ goto nla_put_failure;
frh->res1 = 0;
frh->res2 = 0;
frh->action = rule->action;
@@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
- NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
-
+ if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+ goto nla_put_failure;
if (rule->iifindex == -1)
frh->flags |= FIB_RULE_IIF_DETACHED;
}
if (rule->oifname[0]) {
- NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
-
+ if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+ goto nla_put_failure;
if (rule->oifindex == -1)
frh->flags |= FIB_RULE_OIF_DETACHED;
}
- if (rule->pref)
- NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
-
- if (rule->mark)
- NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
-
- if (rule->mark_mask || rule->mark)
- NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
-
- if (rule->target)
- NLA_PUT_U32(skb, FRA_GOTO, rule->target);
-
+ if ((rule->pref &&
+ nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
+ (rule->mark &&
+ nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
+ ((rule->mark_mask || rule->mark) &&
+ nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
+ (rule->target &&
+ nla_put_u32(skb, FRA_GOTO, rule->target)))
+ goto nla_put_failure;
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6f755cca4520..a3eddb515d1b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
#include <linux/filter.h>
#include <linux/reciprocal_div.h>
#include <linux/ratelimit.h>
+#include <linux/seccomp.h>
/* No hurry in this branch
*
@@ -317,6 +318,9 @@ load_b:
case BPF_S_ANC_CPU:
A = raw_smp_processor_id();
continue;
+ case BPF_S_ANC_ALU_XOR_X:
+ A ^= X;
+ continue;
case BPF_S_ANC_NLATTR: {
struct nlattr *nla;
@@ -352,6 +356,11 @@ load_b:
A = 0;
continue;
}
+#ifdef CONFIG_SECCOMP_FILTER
+ case BPF_S_ANC_SECCOMP_LD_W:
+ A = seccomp_bpf_load(fentry->k);
+ continue;
+#endif
default:
WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
fentry->code, fentry->jt,
@@ -528,7 +537,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
* Compare this with conditional jumps below,
* where offsets are limited. --ANK (981016)
*/
- if (ftest->k >= (unsigned)(flen-pc-1))
+ if (ftest->k >= (unsigned int)(flen-pc-1))
return -EINVAL;
break;
case BPF_S_JMP_JEQ_K:
@@ -561,6 +570,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
ANCILLARY(HATYPE);
ANCILLARY(RXHASH);
ANCILLARY(CPU);
+ ANCILLARY(ALU_XOR_X);
}
}
ftest->code = code;
@@ -589,6 +599,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
}
EXPORT_SYMBOL(sk_filter_release_rcu);
+static int __sk_prepare_filter(struct sk_filter *fp)
+{
+ int err;
+
+ fp->bpf_func = sk_run_filter;
+
+ err = sk_chk_filter(fp->insns, fp->len);
+ if (err)
+ return err;
+
+ bpf_jit_compile(fp);
+ return 0;
+}
+
+/**
+ * sk_unattached_filter_create - create an unattached filter
+ * @fprog: the filter program
+ * @sk: the socket to use
+ *
+ * Create a filter independent ofr any socket. We first run some
+ * sanity checks on it to make sure it does not explode on us later.
+ * If an error occurs or there is insufficient memory for the filter
+ * a negative errno code is returned. On success the return is zero.
+ */
+int sk_unattached_filter_create(struct sk_filter **pfp,
+ struct sock_fprog *fprog)
+{
+ struct sk_filter *fp;
+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ int err;
+
+ /* Make sure new filter is there and in the right amounts. */
+ if (fprog->filter == NULL)
+ return -EINVAL;
+
+ fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ return -ENOMEM;
+ memcpy(fp->insns, fprog->filter, fsize);
+
+ atomic_set(&fp->refcnt, 1);
+ fp->len = fprog->len;
+
+ err = __sk_prepare_filter(fp);
+ if (err)
+ goto free_mem;
+
+ *pfp = fp;
+ return 0;
+free_mem:
+ kfree(fp);
+ return err;
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
+
+void sk_unattached_filter_destroy(struct sk_filter *fp)
+{
+ sk_filter_release(fp);
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
+
/**
* sk_attach_filter - attach a socket filter
* @fprog: the filter program
@@ -619,16 +690,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
- fp->bpf_func = sk_run_filter;
- err = sk_chk_filter(fp->insns, fp->len);
+ err = __sk_prepare_filter(fp);
if (err) {
sk_filter_uncharge(sk, fp);
return err;
}
- bpf_jit_compile(fp);
-
old_fp = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0452eb27a272..ddedf211e588 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -27,7 +27,8 @@
static inline int
gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
{
- NLA_PUT(d->skb, type, size, buf);
+ if (nla_put(d->skb, type, size, buf))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 52d0a4459041..000000000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/highmem.h>
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
- BUG_ON(in_irq());
-
- local_bh_disable();
-#endif
- return kmap_atomic(skb_frag_page(frag));
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
- kunmap_atomic(vaddr);
-#ifdef CONFIG_HIGHMEM
- local_bh_enable();
-#endif
-}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a68045782d1..eb09f8bbbf07 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,8 @@
* Harald Welte Add neighbour cache statistics like rtstat
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -712,14 +714,13 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
- printk(KERN_WARNING
- "Destroying alive neighbour %p\n", neigh);
+ pr_warn("Destroying alive neighbour %p\n", neigh);
dump_stack();
return;
}
if (neigh_del_timer(neigh))
- printk(KERN_WARNING "Impossible event.\n");
+ pr_warn("Impossible event\n");
skb_queue_purge(&neigh->arp_queue);
neigh->arp_queue_len_bytes = 0;
@@ -890,7 +891,7 @@ static void neigh_timer_handler(unsigned long arg)
{
unsigned long now, next;
struct neighbour *neigh = (struct neighbour *)arg;
- unsigned state;
+ unsigned int state;
int notify = 0;
write_lock(&neigh->lock);
@@ -1500,7 +1501,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms)
static struct lock_class_key neigh_table_proxy_queue_class;
-void neigh_table_init_no_netlink(struct neigh_table *tbl)
+static void neigh_table_init_no_netlink(struct neigh_table *tbl)
{
unsigned long now = jiffies;
unsigned long phsize;
@@ -1538,7 +1539,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
}
-EXPORT_SYMBOL(neigh_table_init_no_netlink);
void neigh_table_init(struct neigh_table *tbl)
{
@@ -1555,8 +1555,8 @@ void neigh_table_init(struct neigh_table *tbl)
write_unlock(&neigh_tbl_lock);
if (unlikely(tmp)) {
- printk(KERN_ERR "NEIGH: Registering multiple tables for "
- "family %d\n", tbl->family);
+ pr_err("Registering multiple tables for family %d\n",
+ tbl->family);
dump_stack();
}
}
@@ -1572,7 +1572,7 @@ int neigh_table_clear(struct neigh_table *tbl)
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
- printk(KERN_CRIT "neighbour leakage\n");
+ pr_crit("neighbour leakage\n");
write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
@@ -1768,29 +1768,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
if (nest == NULL)
return -ENOBUFS;
- if (parms->dev)
- NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
-
- NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
- NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
- /* approximative value for deprecated QUEUE_LEN (in packets) */
- NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
- DIV_ROUND_UP(parms->queue_len_bytes,
- SKB_TRUESIZE(ETH_FRAME_LEN)));
- NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
- NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
- NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
- NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
- NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
- NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
- parms->base_reachable_time);
- NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
- NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
- NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
- NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
- NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
- NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
-
+ if ((parms->dev &&
+ nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
+ nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
+ nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+ /* approximative value for deprecated QUEUE_LEN (in packets) */
+ nla_put_u32(skb, NDTPA_QUEUE_LEN,
+ DIV_ROUND_UP(parms->queue_len_bytes,
+ SKB_TRUESIZE(ETH_FRAME_LEN))) ||
+ nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
+ nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
+ nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
+ nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+ nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+ nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
+ parms->base_reachable_time) ||
+ nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+ nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
+ parms->delay_probe_time) ||
+ nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
+ nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
+ nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
+ nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+ goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
@@ -1815,12 +1815,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad1 = 0;
ndtmsg->ndtm_pad2 = 0;
- NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
- NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
- NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
- NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
- NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
-
+ if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+ nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+ nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+ nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ goto nla_put_failure;
{
unsigned long now = jiffies;
unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1841,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
rcu_read_unlock_bh();
- NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+ if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
+ goto nla_put_failure;
}
{
@@ -1866,7 +1867,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
}
- NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+ if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+ goto nla_put_failure;
}
BUG_ON(tbl->parms.dev);
@@ -2137,7 +2139,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
ndm->ndm_type = neigh->type;
ndm->ndm_ifindex = neigh->dev->ifindex;
- NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+ if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
+ goto nla_put_failure;
read_lock_bh(&neigh->lock);
ndm->ndm_state = neigh->nud_state;
@@ -2157,8 +2160,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
read_unlock_bh(&neigh->lock);
- NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
- NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+ if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
+ nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -2187,7 +2191,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
ndm->ndm_ifindex = pn->dev->ifindex;
ndm->ndm_state = NUD_NONE;
- NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
+ if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -2795,7 +2800,6 @@ enum {
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
- char *dev_name;
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
[NEIGH_VAR_MCAST_PROBE] = {
@@ -2921,19 +2925,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
{
struct neigh_sysctl_table *t;
const char *dev_name_source = NULL;
-
-#define NEIGH_CTL_PATH_ROOT 0
-#define NEIGH_CTL_PATH_PROTO 1
-#define NEIGH_CTL_PATH_NEIGH 2
-#define NEIGH_CTL_PATH_DEV 3
-
- struct ctl_path neigh_path[] = {
- { .procname = "net", },
- { .procname = "proto", },
- { .procname = "neigh", },
- { .procname = "default", },
- { },
- };
+ char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
if (!t)
@@ -2961,7 +2953,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
- dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
+ dev_name_source = "default";
t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
@@ -2984,23 +2976,16 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
}
- t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
- if (!t->dev_name)
- goto free;
-
- neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
- neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
-
+ snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
+ p_name, dev_name_source);
t->sysctl_header =
- register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
+ register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
if (!t->sysctl_header)
- goto free_procname;
+ goto free;
p->sysctl_table = t;
return 0;
-free_procname:
- kfree(t->dev_name);
free:
kfree(t);
err:
@@ -3013,8 +2998,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p)
if (p->sysctl_table) {
struct neigh_sysctl_table *t = p->sysctl_table;
p->sysctl_table = NULL;
- unregister_sysctl_table(t->sysctl_header);
- kfree(t->dev_name);
+ unregister_net_sysctl_table(t->sysctl_header);
kfree(t);
}
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 495586232aa1..fdf9e61d0651 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
int (*set)(struct net_device *, unsigned long))
{
struct net_device *net = to_net_dev(dev);
- char *endp;
unsigned long new;
int ret = -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- new = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
+ ret = kstrtoul(buf, 0, &new);
+ if (ret)
goto err;
if (!rtnl_trylock())
@@ -232,7 +231,7 @@ NETDEVICE_SHOW(flags, fmt_hex);
static int change_flags(struct net_device *net, unsigned long new_flags)
{
- return dev_change_flags(net, (unsigned) new_flags);
+ return dev_change_flags(net, (unsigned int) new_flags);
}
static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
@@ -582,7 +581,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
return err;
}
- map = kzalloc(max_t(unsigned,
+ map = kzalloc(max_t(unsigned int,
RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
GFP_KERNEL);
if (!map) {
@@ -903,7 +902,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue,
const char *buf, size_t len)
{
struct dql *dql = &queue->dql;
- unsigned value;
+ unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
@@ -1107,7 +1106,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
return err;
}
- new_dev_maps = kzalloc(max_t(unsigned,
+ new_dev_maps = kzalloc(max_t(unsigned int,
XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
if (!new_dev_maps) {
free_cpumask_var(mask);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..dddbacb8f28c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
@@ -83,21 +85,29 @@ assign:
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
- int err;
+ int err = -ENOMEM;
+ void *data = NULL;
+
if (ops->id && ops->size) {
- void *data = kzalloc(ops->size, GFP_KERNEL);
+ data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto out;
err = net_assign_generic(net, *ops->id, data);
- if (err) {
- kfree(data);
- return err;
- }
+ if (err)
+ goto cleanup;
}
+ err = 0;
if (ops->init)
- return ops->init(net);
- return 0;
+ err = ops->init(net);
+ if (!err)
+ return 0;
+
+cleanup:
+ kfree(data);
+
+out:
+ return err;
}
static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -204,8 +214,8 @@ static void net_free(struct net *net)
{
#ifdef NETNS_REFCNT_DEBUG
if (unlikely(atomic_read(&net->use_count) != 0)) {
- printk(KERN_EMERG "network namespace not free! Usage: %d\n",
- atomic_read(&net->use_count));
+ pr_emerg("network namespace not free! Usage: %d\n",
+ atomic_read(&net->use_count));
return;
}
#endif
@@ -448,12 +458,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
- int err = 0;
- err = ops_init(ops, &init_net);
- if (err)
- ops_free(ops, &init_net);
- return err;
-
+ return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ba6900f73900..5b8aa2fae48b 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -9,6 +9,8 @@
* Authors: Neil Horman <nhorman@tuxdriver.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -23,21 +25,6 @@
#include <net/sock.h>
#include <net/netprio_cgroup.h>
-static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
-static void cgrp_destroy(struct cgroup *cgrp);
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
-
-struct cgroup_subsys net_prio_subsys = {
- .name = "net_prio",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
- .populate = cgrp_populate,
-#ifdef CONFIG_NETPRIO_CGROUP
- .subsys_id = net_prio_subsys_id,
-#endif
- .module = THIS_MODULE
-};
-
#define PRIOIDX_SZ 128
static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -88,7 +75,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
old_priomap = rtnl_dereference(dev->priomap);
if (!new_priomap) {
- printk(KERN_WARNING "Unable to alloc new priomap!\n");
+ pr_warn("Unable to alloc new priomap!\n");
return;
}
@@ -136,7 +123,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
ret = get_prioidx(&cs->prioidx);
if (ret != 0) {
- printk(KERN_WARNING "No space in priority index array\n");
+ pr_warn("No space in priority index array\n");
kfree(cs);
return ERR_PTR(ret);
}
@@ -257,12 +244,19 @@ static struct cftype ss_files[] = {
.read_map = read_priomap,
.write_string = write_priomap,
},
+ { } /* terminate */
};
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
-{
- return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
-}
+struct cgroup_subsys net_prio_subsys = {
+ .name = "net_prio",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+#ifdef CONFIG_NETPRIO_CGROUP
+ .subsys_id = net_prio_subsys_id,
+#endif
+ .base_cftypes = ss_files,
+ .module = THIS_MODULE
+};
static int netprio_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd503..cce9e53528b1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -320,7 +320,7 @@ struct pktgen_dev {
(see RFC 3260, sec. 4) */
/* MPLS */
- unsigned nr_labels; /* Depth of stack, 0 = no MPLS */
+ unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */
__be32 labels[MAX_MPLS_LABELS];
/* VLAN/SVLAN (802.1Q/Q-in-Q) */
@@ -373,10 +373,10 @@ struct pktgen_dev {
*/
char odevname[32];
struct flow_state *flows;
- unsigned cflows; /* Concurrent flows (config) */
- unsigned lflow; /* Flow length (config) */
- unsigned nflows; /* accumulated flows (stats) */
- unsigned curfl; /* current sequenced flow (state)*/
+ unsigned int cflows; /* Concurrent flows (config) */
+ unsigned int lflow; /* Flow length (config) */
+ unsigned int nflows; /* accumulated flows (stats) */
+ unsigned int curfl; /* current sequenced flow (state)*/
u16 queue_map_min;
u16 queue_map_max;
@@ -592,7 +592,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
if (pkt_dev->nr_labels) {
- unsigned i;
+ unsigned int i;
seq_printf(seq, " mpls: ");
for (i = 0; i < pkt_dev->nr_labels; i++)
seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -812,7 +812,7 @@ done_str:
static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
{
- unsigned n = 0;
+ unsigned int n = 0;
char c;
ssize_t i = 0;
int len;
@@ -891,8 +891,8 @@ static ssize_t pktgen_if_write(struct file *file,
if (copy_from_user(tb, user_buffer, copy))
return -EFAULT;
tb[copy] = 0;
- printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
- (unsigned long)count, tb);
+ pr_debug("%s,%lu buffer -:%s:-\n",
+ name, (unsigned long)count, tb);
}
if (!strcmp(name, "min_pkt_size")) {
@@ -1261,8 +1261,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_min;
}
if (debug)
- printk(KERN_DEBUG "pktgen: dst_min set to: %s\n",
- pkt_dev->dst_min);
+ pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
i += len;
sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
return count;
@@ -1284,8 +1283,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_max;
}
if (debug)
- printk(KERN_DEBUG "pktgen: dst_max set to: %s\n",
- pkt_dev->dst_max);
+ pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
i += len;
sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
return count;
@@ -1307,7 +1305,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
if (debug)
- printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
+ pr_debug("dst6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6=%s", buf);
@@ -1329,7 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
if (debug)
- printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
+ pr_debug("dst6_min set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_min=%s", buf);
@@ -1350,7 +1348,7 @@ static ssize_t pktgen_if_write(struct file *file,
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
if (debug)
- printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
+ pr_debug("dst6_max set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_max=%s", buf);
@@ -1373,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
if (debug)
- printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
+ pr_debug("src6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: src6=%s", buf);
@@ -1394,8 +1392,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_min;
}
if (debug)
- printk(KERN_DEBUG "pktgen: src_min set to: %s\n",
- pkt_dev->src_min);
+ pr_debug("src_min set to: %s\n", pkt_dev->src_min);
i += len;
sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
return count;
@@ -1415,8 +1412,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_max;
}
if (debug)
- printk(KERN_DEBUG "pktgen: src_max set to: %s\n",
- pkt_dev->src_max);
+ pr_debug("src_max set to: %s\n", pkt_dev->src_max);
i += len;
sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
return count;
@@ -1510,7 +1506,7 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "mpls")) {
- unsigned n, cnt;
+ unsigned int n, cnt;
len = get_labels(&user_buffer[i], pkt_dev);
if (len < 0)
@@ -1527,7 +1523,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n");
+ pr_debug("VLAN/SVLAN auto turned off\n");
}
return count;
}
@@ -1542,10 +1538,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->vlan_id = value; /* turn on VLAN */
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN turned on\n");
+ pr_debug("VLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+ pr_debug("MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
@@ -1554,7 +1550,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+ pr_debug("VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1599,10 +1595,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = value; /* turn on SVLAN */
if (debug)
- printk(KERN_DEBUG "pktgen: SVLAN turned on\n");
+ pr_debug("SVLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+ pr_debug("MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
@@ -1611,7 +1607,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+ pr_debug("VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1779,8 +1775,7 @@ static ssize_t pktgen_thread_write(struct file *file,
i += len;
if (debug)
- printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n",
- name, (unsigned long)count);
+ pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
if (!t) {
pr_err("ERROR: No thread\n");
@@ -1931,7 +1926,7 @@ static int pktgen_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
return NOTIFY_DONE;
/* It is OK that we do not hold the group lock right now,
@@ -2324,7 +2319,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
}
if (pkt_dev->flags & F_MPLS_RND) {
- unsigned i;
+ unsigned int i;
for (i = 0; i < pkt_dev->nr_labels; i++)
if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@ -2550,7 +2545,7 @@ err:
static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < pkt_dev->nr_labels; i++)
*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
@@ -2934,8 +2929,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
if (datalen < sizeof(struct pktgen_hdr)) {
datalen = sizeof(struct pktgen_hdr);
- if (net_ratelimit())
- pr_info("increased datalen to %d\n", datalen);
+ net_info_ratelimited("increased datalen to %d\n", datalen);
}
udph->source = htons(pkt_dev->cur_udp_src);
@@ -3365,8 +3359,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->errors++;
break;
default: /* Drivers are not supposed to return other values! */
- if (net_ratelimit())
- pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret);
+ net_info_ratelimited("%s xmit error: %d\n",
+ pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
@@ -3755,12 +3749,18 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
+ LIST_HEAD(list);
/* Stop all interfaces & threads */
pktgen_exiting = true;
- list_for_each_safe(q, n, &pktgen_threads) {
+ mutex_lock(&pktgen_thread_lock);
+ list_splice_init(&pktgen_threads, &list);
+ mutex_unlock(&pktgen_thread_lock);
+
+ list_for_each_safe(q, n, &list) {
t = list_entry(q, struct pktgen_thread, th_list);
+ list_del(&t->th_list);
kthread_stop(t->tsk);
kfree(t);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 90430b776ece..21318d15bbc3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,9 @@
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
+#include <linux/if_bridge.h>
#include <linux/pci.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -552,7 +554,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
}
EXPORT_SYMBOL(__rta_fill);
-int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
+int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
{
struct sock *rtnl = net->rtnl;
int err = 0;
@@ -607,7 +609,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
for (i = 0; i < RTAX_MAX; i++) {
if (metrics[i]) {
valid++;
- NLA_PUT_U32(skb, i+1, metrics[i]);
+ if (nla_put_u32(skb, i+1, metrics[i]))
+ goto nla_put_failure;
}
}
@@ -782,6 +785,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask
@@ -807,7 +811,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
vf_port = nla_nest_start(skb, IFLA_VF_PORT);
if (!vf_port)
goto nla_put_failure;
- NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
+ if (nla_put_u32(skb, IFLA_PORT_VF, vf))
+ goto nla_put_failure;
err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
if (err == -EMSGSIZE)
goto nla_put_failure;
@@ -891,25 +896,23 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
- NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
- NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
- NLA_PUT_U8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
- NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
- NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
- NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
-
- if (dev->ifindex != dev->iflink)
- NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
- if (dev->master)
- NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
-
- if (dev->qdisc)
- NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
-
- if (dev->ifalias)
- NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+ if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+ nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
+ nla_put_u8(skb, IFLA_OPERSTATE,
+ netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
+ nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
+ nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+ nla_put_u32(skb, IFLA_GROUP, dev->group) ||
+ nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+ (dev->ifindex != dev->iflink &&
+ nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+ (dev->master &&
+ nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+ (dev->qdisc &&
+ nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+ (dev->ifalias &&
+ nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
+ goto nla_put_failure;
if (1) {
struct rtnl_link_ifmap map = {
@@ -920,12 +923,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
.dma = dev->dma,
.port = dev->if_port,
};
- NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+ if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+ goto nla_put_failure;
}
if (dev->addr_len) {
- NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
- NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+ if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
+ nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
+ goto nla_put_failure;
}
attr = nla_reserve(skb, IFLA_STATS,
@@ -942,8 +947,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
copy_rtnl_link_stats64(nla_data(attr), stats);
- if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
- NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
+ nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
+ goto nla_put_failure;
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -986,12 +992,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
- NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
- NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
- NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
- &vf_tx_rate);
- NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
- &vf_spoofchk);
+ if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
+ nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+ nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+ &vf_tx_rate) ||
+ nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+ &vf_spoofchk))
+ goto nla_put_failure;
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
@@ -1113,6 +1120,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 },
+ [IFLA_PROMISCUITY] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1516,11 +1524,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
err = 0;
errout:
- if (err < 0 && modified && net_ratelimit())
- printk(KERN_WARNING "A link change request failed with "
- "some changes committed already. Interface %s may "
- "have been left with an inconsistent configuration, "
- "please check.\n", dev->name);
+ if (err < 0 && modified)
+ net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
+ dev->name);
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
@@ -1634,14 +1640,14 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
int err;
struct net_device *dev;
unsigned int num_queues = 1;
- unsigned int real_num_queues = 1;
if (ops->get_tx_queues) {
- err = ops->get_tx_queues(src_net, tb, &num_queues,
- &real_num_queues);
- if (err)
+ err = ops->get_tx_queues(src_net, tb);
+ if (err < 0)
goto err;
+ num_queues = err;
}
+
err = -ENOMEM;
dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
if (!dev)
@@ -1947,7 +1953,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -1972,6 +1978,267 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
+static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+ struct net_device *dev,
+ u8 *addr, u32 pid, u32 seq,
+ int type, unsigned int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = flags;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dev->ifindex;
+ ndm->ndm_state = NUD_PERMANENT;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t rtnl_fdb_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+}
+
+static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ return;
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *master = NULL;
+ struct ndmsg *ndm;
+ struct nlattr *tb[NDA_MAX+1];
+ struct net_device *dev;
+ u8 *addr;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+ if (err < 0)
+ return err;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex == 0) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
+ return -EINVAL;
+ }
+
+ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
+ return -ENODEV;
+ }
+
+ if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
+ return -EINVAL;
+ }
+
+ addr = nla_data(tb[NDA_LLADDR]);
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
+ return -EINVAL;
+ }
+
+ err = -EOPNOTSUPP;
+
+ /* Support fdb on master device the net/bridge default case */
+ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+ (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ master = dev->master;
+ err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+ nlh->nlmsg_flags);
+ if (err)
+ goto out;
+ else
+ ndm->ndm_flags &= ~NTF_MASTER;
+ }
+
+ /* Embedded bridge, macvlan, and any other device support */
+ if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
+ err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+ nlh->nlmsg_flags);
+
+ if (!err) {
+ rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
+ ndm->ndm_flags &= ~NTF_SELF;
+ }
+ }
+out:
+ return err;
+}
+
+static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ndmsg *ndm;
+ struct nlattr *llattr;
+ struct net_device *dev;
+ int err = -EINVAL;
+ __u8 *addr;
+
+ if (nlmsg_len(nlh) < sizeof(*ndm))
+ return -EINVAL;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex == 0) {
+ pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
+ return -EINVAL;
+ }
+
+ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
+ return -ENODEV;
+ }
+
+ llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
+ if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
+ pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
+ return -EINVAL;
+ }
+
+ addr = nla_data(llattr);
+ err = -EOPNOTSUPP;
+
+ /* Support fdb on master device the net/bridge default case */
+ if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+ (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ struct net_device *master = dev->master;
+
+ if (master->netdev_ops->ndo_fdb_del)
+ err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+ if (err)
+ goto out;
+ else
+ ndm->ndm_flags &= ~NTF_MASTER;
+ }
+
+ /* Embedded bridge, macvlan, and any other device support */
+ if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
+ err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+ if (!err) {
+ rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
+ ndm->ndm_flags &= ~NTF_SELF;
+ }
+ }
+out:
+ return err;
+}
+
+static int nlmsg_populate_fdb(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int *idx,
+ struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+ u32 pid, seq;
+
+ pid = NETLINK_CB(cb->skb).pid;
+ seq = cb->nlh->nlmsg_seq;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (*idx < cb->args[0])
+ goto skip;
+
+ err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+ pid, seq, 0, NTF_SELF);
+ if (err < 0)
+ return err;
+skip:
+ *idx += 1;
+ }
+ return 0;
+}
+
+/**
+ * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
+ * @nlh: netlink message header
+ * @dev: netdevice
+ *
+ * Default netdevice operation to dump the existing unicast address list.
+ * Returns zero on success.
+ */
+int ndo_dflt_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
+ if (err)
+ goto out;
+ nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
+out:
+ netif_addr_unlock_bh(dev);
+ return idx;
+}
+EXPORT_SYMBOL(ndo_dflt_fdb_dump);
+
+static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx = 0;
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (dev->priv_flags & IFF_BRIDGE_PORT) {
+ struct net_device *master = dev->master;
+ const struct net_device_ops *ops = master->netdev_ops;
+
+ if (ops->ndo_fdb_dump)
+ idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
+ }
+
+ if (dev->netdev_ops->ndo_fdb_dump)
+ idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = idx;
+ return skb->len;
+}
+
/* Protected by RTNL sempahore. */
static struct rtattr **rta_buf;
static int rtattr_max;
@@ -2042,7 +2309,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
while (RTA_OK(attr, attrlen)) {
- unsigned flavor = attr->rta_type;
+ unsigned int flavor = attr->rta_type;
if (flavor) {
if (flavor > rta_max[sz_idx])
return -EINVAL;
@@ -2144,5 +2411,9 @@ void __init rtnetlink_init(void)
rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
+
+ rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
+ rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
+ rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index baf8d281152c..016694d62484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -36,6 +36,8 @@
* The functions in this file will not compile correctly with gcc 2.4.x
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -67,10 +69,9 @@
#include <asm/uaccess.h>
#include <trace/events/skb.h>
+#include <linux/highmem.h>
-#include "kmap_skb.h"
-
-static struct kmem_cache *skbuff_head_cache __read_mostly;
+struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
@@ -119,11 +120,10 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
*/
static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{
- printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
- "data:%p tail:%#lx end:%#lx dev:%s\n",
- here, skb->len, sz, skb->head, skb->data,
- (unsigned long)skb->tail, (unsigned long)skb->end,
- skb->dev ? skb->dev->name : "<NULL>");
+ pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+ __func__, here, skb->len, sz, skb->head, skb->data,
+ (unsigned long)skb->tail, (unsigned long)skb->end,
+ skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
@@ -138,11 +138,10 @@ static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{
- printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
- "data:%p tail:%#lx end:%#lx dev:%s\n",
- here, skb->len, sz, skb->head, skb->data,
- (unsigned long)skb->tail, (unsigned long)skb->end,
- skb->dev ? skb->dev->name : "<NULL>");
+ pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+ __func__, here, skb->len, sz, skb->head, skb->data,
+ (unsigned long)skb->tail, (unsigned long)skb->end,
+ skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
@@ -246,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb);
/**
* build_skb - build a network buffer
* @data: data buffer provided by caller
+ * @frag_size: size of fragment, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc()
@@ -259,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb);
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *build_skb(void *data)
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
- unsigned int size;
+ unsigned int size = frag_size ? : ksize(data);
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
if (!skb)
return NULL;
- size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
+ skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -292,6 +293,46 @@ struct sk_buff *build_skb(void *data)
}
EXPORT_SYMBOL(build_skb);
+struct netdev_alloc_cache {
+ struct page *page;
+ unsigned int offset;
+};
+static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+ struct netdev_alloc_cache *nc;
+ void *data = NULL;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ nc = &__get_cpu_var(netdev_alloc_cache);
+ if (unlikely(!nc->page)) {
+refill:
+ nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ nc->offset = 0;
+ }
+ if (likely(nc->page)) {
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ put_page(nc->page);
+ goto refill;
+ }
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ get_page(nc->page);
+ }
+ local_irq_restore(flags);
+ return data;
+}
+EXPORT_SYMBOL(netdev_alloc_frag);
+
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
@@ -306,11 +347,23 @@ EXPORT_SYMBOL(build_skb);
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length, gfp_t gfp_mask)
+ unsigned int length, gfp_t gfp_mask)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+ void *data = netdev_alloc_frag(fragsz);
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ if (likely(data)) {
+ skb = build_skb(data, fragsz);
+ if (unlikely(!skb))
+ put_page(virt_to_head_page(data));
+ }
+ } else {
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ }
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
@@ -329,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
}
EXPORT_SYMBOL(skb_add_rx_frag);
-/**
- * dev_alloc_skb - allocate an skbuff for receiving
- * @length: length to allocate
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned if there is no free memory. Although this function
- * allocates memory it can be called from an interrupt.
- */
-struct sk_buff *dev_alloc_skb(unsigned int length)
-{
- /*
- * There is more code here than it seems:
- * __dev_alloc_skb is an inline
- */
- return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-EXPORT_SYMBOL(dev_alloc_skb);
-
static void skb_drop_list(struct sk_buff **listp)
{
struct sk_buff *list = *listp;
@@ -377,6 +408,14 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list);
}
+static void skb_free_head(struct sk_buff *skb)
+{
+ if (skb->head_frag)
+ put_page(virt_to_head_page(skb->head));
+ else
+ kfree(skb->head);
+}
+
static void skb_release_data(struct sk_buff *skb)
{
if (!skb->cloned ||
@@ -403,7 +442,7 @@ static void skb_release_data(struct sk_buff *skb)
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
- kfree(skb->head);
+ skb_free_head(skb);
}
}
@@ -645,6 +684,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(tail);
C(end);
C(head);
+ C(head_frag);
C(data);
C(truesize);
atomic_set(&n->users, 1);
@@ -707,10 +747,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
return -ENOMEM;
}
- vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+ vaddr = kmap_atomic(skb_frag_page(f));
memcpy(page_address(page),
vaddr + f->page_offset, skb_frag_size(f));
- kunmap_skb_frag(vaddr);
+ kunmap_atomic(vaddr);
page->private = (unsigned long)head;
head = page;
}
@@ -819,7 +859,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
- unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+ unsigned int size = skb_end_offset(skb) + skb->data_len;
struct sk_buff *n = alloc_skb(size, gfp_mask);
if (!n)
@@ -920,9 +960,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
{
int i;
u8 *data;
- int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
+ int size = nhead + skb_end_offset(skb) + ntail;
long off;
- bool fastpath;
BUG_ON(nhead < 0);
@@ -931,30 +970,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
size = SKB_DATA_ALIGN(size);
- /* Check if we can avoid taking references on fragments if we own
- * the last reference on skb->head. (see skb_release_data())
- */
- if (!skb->cloned)
- fastpath = true;
- else {
- int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
- fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
- }
-
- if (fastpath &&
- size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
- memmove(skb->head + size, skb_shinfo(skb),
- offsetof(struct skb_shared_info,
- frags[skb_shinfo(skb)->nr_frags]));
- memmove(skb->head + nhead, skb->head,
- skb_tail_pointer(skb) - skb->head);
- off = nhead;
- goto adjust_others;
- }
-
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ gfp_mask);
if (!data)
goto nodata;
+ size = SKB_WITH_OVERHEAD(ksize(data));
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.
@@ -965,9 +985,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
- if (fastpath) {
- kfree(skb->head);
- } else {
+ /*
+ * if shinfo is shared we must drop the old head gracefully, but if it
+ * is not we can just drop the old head and let the existing refcount
+ * be since all we did is relocate the values
+ */
+ if (skb_cloned(skb)) {
/* copy this zero copy skb frags */
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
@@ -980,11 +1003,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_clone_fraglist(skb);
skb_release_data(skb);
+ } else {
+ skb_free_head(skb);
}
off = (data + nhead) - skb->head;
skb->head = data;
-adjust_others:
+ skb->head_frag = 0;
skb->data += off;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->end = size;
@@ -1273,7 +1298,7 @@ drop_pages:
return -ENOMEM;
nfrag->next = frag->next;
- kfree_skb(frag);
+ consume_skb(frag);
frag = nfrag;
*fragp = frag;
}
@@ -1485,21 +1510,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ end = start + skb_frag_size(f);
if ((copy = end - offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+ vaddr = kmap_atomic(skb_frag_page(f));
memcpy(to,
- vaddr + skb_shinfo(skb)->frags[i].page_offset+
- offset - start, copy);
- kunmap_skb_frag(vaddr);
+ vaddr + f->page_offset + offset - start,
+ copy);
+ kunmap_atomic(vaddr);
if ((len -= copy) == 0)
return 0;
@@ -1545,9 +1571,9 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
put_page(spd->pages[i]);
}
-static inline struct page *linear_to_page(struct page *page, unsigned int *len,
- unsigned int *offset,
- struct sk_buff *skb, struct sock *sk)
+static struct page *linear_to_page(struct page *page, unsigned int *len,
+ unsigned int *offset,
+ struct sk_buff *skb, struct sock *sk)
{
struct page *p = sk->sk_sndmsg_page;
unsigned int off;
@@ -1563,6 +1589,9 @@ new_page:
} else {
unsigned int mlen;
+ /* If we are the only user of the page, we can reset offset */
+ if (page_count(p) == 1)
+ sk->sk_sndmsg_off = 0;
off = sk->sk_sndmsg_off;
mlen = PAGE_SIZE - off;
if (mlen < 64 && mlen < *len) {
@@ -1576,36 +1605,48 @@ new_page:
memcpy(page_address(p) + off, page_address(page) + *offset, *len);
sk->sk_sndmsg_off += *len;
*offset = off;
- get_page(p);
return p;
}
+static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
+ struct page *page,
+ unsigned int offset)
+{
+ return spd->nr_pages &&
+ spd->pages[spd->nr_pages - 1] == page &&
+ (spd->partial[spd->nr_pages - 1].offset +
+ spd->partial[spd->nr_pages - 1].len == offset);
+}
+
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
-static inline int spd_fill_page(struct splice_pipe_desc *spd,
- struct pipe_inode_info *pipe, struct page *page,
- unsigned int *len, unsigned int offset,
- struct sk_buff *skb, int linear,
- struct sock *sk)
+static bool spd_fill_page(struct splice_pipe_desc *spd,
+ struct pipe_inode_info *pipe, struct page *page,
+ unsigned int *len, unsigned int offset,
+ struct sk_buff *skb, bool linear,
+ struct sock *sk)
{
- if (unlikely(spd->nr_pages == pipe->buffers))
- return 1;
+ if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
+ return true;
if (linear) {
page = linear_to_page(page, len, &offset, skb, sk);
if (!page)
- return 1;
- } else
- get_page(page);
-
+ return true;
+ }
+ if (spd_can_coalesce(spd, page, offset)) {
+ spd->partial[spd->nr_pages - 1].len += *len;
+ return false;
+ }
+ get_page(page);
spd->pages[spd->nr_pages] = page;
spd->partial[spd->nr_pages].len = *len;
spd->partial[spd->nr_pages].offset = offset;
spd->nr_pages++;
- return 0;
+ return false;
}
static inline void __segment_seek(struct page **page, unsigned int *poff,
@@ -1622,20 +1663,20 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
*plen -= off;
}
-static inline int __splice_segment(struct page *page, unsigned int poff,
- unsigned int plen, unsigned int *off,
- unsigned int *len, struct sk_buff *skb,
- struct splice_pipe_desc *spd, int linear,
- struct sock *sk,
- struct pipe_inode_info *pipe)
+static bool __splice_segment(struct page *page, unsigned int poff,
+ unsigned int plen, unsigned int *off,
+ unsigned int *len, struct sk_buff *skb,
+ struct splice_pipe_desc *spd, bool linear,
+ struct sock *sk,
+ struct pipe_inode_info *pipe)
{
if (!*len)
- return 1;
+ return true;
/* skip this segment if already processed */
if (*off >= plen) {
*off -= plen;
- return 0;
+ return false;
}
/* ignore any bits we already processed */
@@ -1651,34 +1692,38 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
- return 1;
+ return true;
__segment_seek(&page, &poff, &plen, flen);
*len -= flen;
} while (*len && plen);
- return 0;
+ return false;
}
/*
- * Map linear and fragment data from the skb to spd. It reports failure if the
+ * Map linear and fragment data from the skb to spd. It reports true if the
* pipe is full or if we already spliced the requested length.
*/
-static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
- unsigned int *offset, unsigned int *len,
- struct splice_pipe_desc *spd, struct sock *sk)
+static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+ unsigned int *offset, unsigned int *len,
+ struct splice_pipe_desc *spd, struct sock *sk)
{
int seg;
- /*
- * map the linear part
+ /* map the linear part :
+ * If skb->head_frag is set, this 'linear' part is backed by a
+ * fragment, and if the head is not shared with any clones then
+ * we can avoid a copy since we own the head portion of this page.
*/
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd, 1, sk, pipe))
- return 1;
+ offset, len, skb, spd,
+ skb_head_is_locked(skb),
+ sk, pipe))
+ return true;
/*
* then map the fragments
@@ -1688,11 +1733,11 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(skb_frag_page(f),
f->page_offset, skb_frag_size(f),
- offset, len, skb, spd, 0, sk, pipe))
- return 1;
+ offset, len, skb, spd, false, sk, pipe))
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -1705,8 +1750,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct page *pages[PIPE_DEF_BUFFERS];
+ struct partial_page partial[MAX_SKB_FRAGS];
+ struct page *pages[MAX_SKB_FRAGS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
@@ -1718,9 +1763,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct sock *sk = skb->sk;
int ret = 0;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
/*
* __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case.
@@ -1756,7 +1798,6 @@ done:
lock_sock(sk);
}
- splice_shrink_spd(pipe, &spd);
return ret;
}
@@ -1804,10 +1845,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(frag);
+ vaddr = kmap_atomic(skb_frag_page(frag));
memcpy(vaddr + frag->page_offset + offset - start,
from, copy);
- kunmap_skb_frag(vaddr);
+ kunmap_atomic(vaddr);
if ((len -= copy) == 0)
return 0;
@@ -1867,21 +1908,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(frag);
+ vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = csum_partial(vaddr + frag->page_offset +
offset - start, copy, 0);
- kunmap_skb_frag(vaddr);
+ kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
return csum;
@@ -1953,12 +1994,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
- vaddr = kmap_skb_frag(frag);
+ vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = csum_partial_copy_nocheck(vaddr +
frag->page_offset +
offset - start, to,
copy, 0);
- kunmap_skb_frag(vaddr);
+ kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
return csum;
@@ -2478,7 +2519,7 @@ next_skb:
if (abs_offset < block_limit) {
if (!st->frag_data)
- st->frag_data = kmap_skb_frag(frag);
+ st->frag_data = kmap_atomic(skb_frag_page(frag));
*data = (u8 *) st->frag_data + frag->page_offset +
(abs_offset - st->stepped_offset);
@@ -2487,7 +2528,7 @@ next_skb:
}
if (st->frag_data) {
- kunmap_skb_frag(st->frag_data);
+ kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
@@ -2496,7 +2537,7 @@ next_skb:
}
if (st->frag_data) {
- kunmap_skb_frag(st->frag_data);
+ kunmap_atomic(st->frag_data);
st->frag_data = NULL;
}
@@ -2524,7 +2565,7 @@ EXPORT_SYMBOL(skb_seq_read);
void skb_abort_seq_read(struct skb_seq_state *st)
{
if (st->frag_data)
- kunmap_skb_frag(st->frag_data);
+ kunmap_atomic(st->frag_data);
}
EXPORT_SYMBOL(skb_abort_seq_read);
@@ -2716,14 +2757,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
if (unlikely(!nskb))
goto err;
- hsize = skb_end_pointer(nskb) - nskb->head;
+ hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
- nskb->truesize += skb_end_pointer(nskb) - nskb->head -
- hsize;
+ nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
@@ -2841,6 +2881,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
unsigned int len = skb_gro_len(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
+ unsigned int delta_truesize;
if (p->len + len >= 65536)
return -E2BIG;
@@ -2870,11 +2911,41 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
frag->page_offset += offset;
skb_frag_size_sub(frag, offset);
+ /* all fragments truesize : remove (head size + sk_buff) */
+ delta_truesize = skb->truesize -
+ SKB_TRUESIZE(skb_end_offset(skb));
+
skb->truesize -= skb->data_len;
skb->len -= skb->data_len;
skb->data_len = 0;
- NAPI_GRO_CB(skb)->free = 1;
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
+ goto done;
+ } else if (skb->head_frag) {
+ int nr_frags = pinfo->nr_frags;
+ skb_frag_t *frag = pinfo->frags + nr_frags;
+ struct page *page = virt_to_head_page(skb->head);
+ unsigned int first_size = headlen - offset;
+ unsigned int first_offset;
+
+ if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
+ return -E2BIG;
+
+ first_offset = skb->data -
+ (unsigned char *)page_address(page) +
+ offset;
+
+ pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
+
+ frag->page.p = page;
+ frag->page_offset = first_offset;
+ skb_frag_size_set(frag, first_size);
+
+ memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
+ /* We dont need to clear skbinfo->nr_frags here */
+
+ delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
goto done;
} else if (skb_gro_len(p) != pinfo->gso_size)
return -E2BIG;
@@ -2916,7 +2987,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
p = nskb;
merge:
- p->truesize += skb->truesize - len;
+ delta_truesize = skb->truesize;
if (offset > headlen) {
unsigned int eat = offset - headlen;
@@ -2936,7 +3007,7 @@ merge:
done:
NAPI_GRO_CB(p)->count++;
p->data_len += len;
- p->truesize += len;
+ p->truesize += delta_truesize;
p->len += len;
NAPI_GRO_CB(skb)->same_flow = 1;
@@ -3164,7 +3235,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
int len = skb->len;
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ (unsigned int)sk->sk_rcvbuf)
return -ENOMEM;
skb_orphan(skb);
@@ -3258,10 +3329,8 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
if (unlikely(start > skb_headlen(skb)) ||
unlikely((int)start + off > skb_headlen(skb) - 2)) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb_headlen(skb));
+ net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
+ start, off, skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -3273,8 +3342,93 @@ EXPORT_SYMBOL_GPL(skb_partial_csum_set);
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
- if (net_ratelimit())
- pr_warning("%s: received packets cannot be forwarded"
- " while LRO is enabled\n", skb->dev->name);
+ net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
+ skb->dev->name);
}
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
+{
+ if (head_stolen)
+ kmem_cache_free(skbuff_head_cache, skb);
+ else
+ __kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb_partial);
+
+/**
+ * skb_try_coalesce - try to merge skb to prior one
+ * @to: prior buffer
+ * @from: buffer to add
+ * @fragstolen: pointer to boolean
+ *
+ */
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize)
+{
+ int i, delta, len = from->len;
+
+ *fragstolen = false;
+
+ if (skb_cloned(to))
+ return false;
+
+ if (len <= skb_tailroom(to)) {
+ BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+ *delta_truesize = 0;
+ return true;
+ }
+
+ if (skb_has_frag_list(to) || skb_has_frag_list(from))
+ return false;
+
+ if (skb_headlen(from) != 0) {
+ struct page *page;
+ unsigned int offset;
+
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ return false;
+
+ if (skb_head_is_locked(from))
+ return false;
+
+ delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+ page = virt_to_head_page(from->head);
+ offset = from->data - (unsigned char *)page_address(page);
+
+ skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
+ page, offset, skb_headlen(from));
+ *fragstolen = true;
+ } else {
+ if (skb_shinfo(to)->nr_frags +
+ skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
+ return false;
+
+ delta = from->truesize -
+ SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+ }
+
+ WARN_ON_ONCE(delta < len);
+
+ memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
+ skb_shinfo(from)->frags,
+ skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
+ skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
+
+ if (!skb_cloned(from))
+ skb_shinfo(from)->nr_frags = 0;
+
+ /* if the skb is cloned this does nothing since we set nr_frags to 0 */
+ for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
+ skb_frag_ref(from, i);
+
+ to->truesize += delta;
+ to->len += len;
+ to->data_len += len;
+
+ *delta_truesize = delta;
+ return true;
+}
+EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/core/sock.c b/net/core/sock.c
index b2e14c07d920..653f8c0aedc5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -89,6 +89,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -113,6 +115,7 @@
#include <linux/user_namespace.h>
#include <linux/static_key.h>
#include <linux/memcontrol.h>
+#include <linux/prefetch.h>
#include <asm/uaccess.h>
@@ -140,7 +143,7 @@ static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
-int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
struct proto *proto;
int ret = 0;
@@ -148,7 +151,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
mutex_lock(&proto_list_mutex);
list_for_each_entry(proto, &proto_list, node) {
if (proto->init_cgroup) {
- ret = proto->init_cgroup(cgrp, ss);
+ ret = proto->init_cgroup(memcg, ss);
if (ret)
goto out;
}
@@ -159,19 +162,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
out:
list_for_each_entry_continue_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
- proto->destroy_cgroup(cgrp);
+ proto->destroy_cgroup(memcg);
mutex_unlock(&proto_list_mutex);
return ret;
}
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
{
struct proto *proto;
mutex_lock(&proto_list_mutex);
list_for_each_entry_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
- proto->destroy_cgroup(cgrp);
+ proto->destroy_cgroup(memcg);
mutex_unlock(&proto_list_mutex);
}
#endif
@@ -258,7 +261,9 @@ static struct lock_class_key af_callback_keys[AF_MAX];
/* Run time adjustable parameters. */
__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+EXPORT_SYMBOL(sysctl_wmem_max);
__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
@@ -294,9 +299,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
*timeo_p = 0;
if (warned < 10 && net_ratelimit()) {
warned++;
- printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
- "tries to set negative timeout\n",
- current->comm, task_pid_nr(current));
+ pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
+ __func__, current->comm, task_pid_nr(current));
}
return 0;
}
@@ -314,8 +318,8 @@ static void sock_warn_obsolete_bsdism(const char *name)
static char warncomm[TASK_COMM_LEN];
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
- printk(KERN_WARNING "process `%s' is using obsolete "
- "%s SO_BSDCOMPAT\n", warncomm, name);
+ pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
+ warncomm, name);
warned++;
}
}
@@ -389,7 +393,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL;
- if (sk_rcvqueues_full(sk, skb)) {
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
@@ -406,7 +410,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else if (sk_add_backlog(sk, skb)) {
+ } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
@@ -561,7 +565,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
- sk->sk_reuse = valbool;
+ sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_TYPE:
case SO_PROTOCOL:
@@ -577,23 +581,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_wmem_max)
- val = sysctl_wmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sk_sndbuf = SOCK_MIN_SNDBUF;
- else
- sk->sk_sndbuf = val * 2;
-
- /*
- * Wake up sending tasks if we
- * upped the value.
- */
+ sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
+ /* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
@@ -606,12 +602,11 @@ set_sndbuf:
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_rmem_max)
- val = sysctl_rmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
@@ -629,10 +624,7 @@ set_rcvbuf:
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
- else
- sk->sk_rcvbuf = val * 2;
+ sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
@@ -821,8 +813,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
if (cred) {
struct user_namespace *current_ns = current_user_ns();
- ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
- ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
+ ucred->uid = from_kuid(current_ns, cred->euid);
+ ucred->gid = from_kgid(current_ns, cred->egid);
}
}
EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -858,7 +850,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_BROADCAST:
- v.val = !!sock_flag(sk, SOCK_BROADCAST);
+ v.val = sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
@@ -874,7 +866,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_KEEPALIVE:
- v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+ v.val = sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
@@ -896,7 +888,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_OOBINLINE:
- v.val = !!sock_flag(sk, SOCK_URGINLINE);
+ v.val = sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
@@ -909,7 +901,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_LINGER:
lv = sizeof(v.ling);
- v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
+ v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
@@ -975,7 +967,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PASSCRED:
- v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_PEERCRED:
@@ -1010,7 +1002,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PASSSEC:
- v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_PEERSEC:
@@ -1021,11 +1013,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_RXQ_OVFL:
- v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
+ v.val = sock_flag(sk, SOCK_RXQ_OVFL);
break;
case SO_WIFI_STATUS:
- v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+ v.val = sock_flag(sk, SOCK_WIFI_STATUS);
break;
case SO_PEEK_OFF:
@@ -1035,7 +1027,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_peek_off;
break;
case SO_NOFCS:
- v.val = !!sock_flag(sk, SOCK_NOFCS);
+ v.val = sock_flag(sk, SOCK_NOFCS);
break;
default:
return -ENOPROTOOPT;
@@ -1247,8 +1239,8 @@ static void __sk_free(struct sock *sk)
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
if (atomic_read(&sk->sk_omem_alloc))
- printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
- __func__, atomic_read(&sk->sk_omem_alloc));
+ pr_debug("%s: optmem leakage (%d bytes) detected\n",
+ __func__, atomic_read(&sk->sk_omem_alloc));
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
@@ -1534,7 +1526,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
*/
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
{
- if ((unsigned)size <= sysctl_optmem_max &&
+ if ((unsigned int)size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
void *mem;
/* First do the add, to avoid the race if kmalloc
@@ -1712,6 +1704,7 @@ static void __release_sock(struct sock *sk)
do {
struct sk_buff *next = skb->next;
+ prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
skb->next = NULL;
sk_backlog_rcv(sk, skb);
@@ -2432,7 +2425,7 @@ static void assign_proto_idx(struct proto *prot)
prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
- printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
+ pr_err("PROTO_INUSE_NR exhausted\n");
return;
}
@@ -2462,8 +2455,8 @@ int proto_register(struct proto *prot, int alloc_slab)
NULL);
if (prot->slab == NULL) {
- printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
- prot->name);
+ pr_crit("%s: Can't create sock SLAB cache!\n",
+ prot->name);
goto out;
}
@@ -2477,8 +2470,8 @@ int proto_register(struct proto *prot, int alloc_slab)
SLAB_HWCACHE_ALIGN, NULL);
if (prot->rsk_prot->slab == NULL) {
- printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
- prot->name);
+ pr_crit("%s: Can't create request sock SLAB cache!\n",
+ prot->name);
goto out_free_request_sock_slab_name;
}
}
@@ -2576,7 +2569,7 @@ static char proto_method_implemented(const void *method)
}
static long sock_prot_memory_allocated(struct proto *proto)
{
- return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+ return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
}
static char *sock_prot_memory_pressure(struct proto *proto)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b9868e1fd62c..5fd146720f39 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -10,7 +10,7 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
static DEFINE_MUTEX(sock_diag_table_mutex);
@@ -70,7 +70,7 @@ void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlms
}
EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
-int sock_diag_register(struct sock_diag_handler *hndl)
+int sock_diag_register(const struct sock_diag_handler *hndl)
{
int err = 0;
@@ -88,7 +88,7 @@ int sock_diag_register(struct sock_diag_handler *hndl)
}
EXPORT_SYMBOL_GPL(sock_diag_register);
-void sock_diag_unregister(struct sock_diag_handler *hnld)
+void sock_diag_unregister(const struct sock_diag_handler *hnld)
{
int family = hnld->family;
@@ -102,7 +102,7 @@ void sock_diag_unregister(struct sock_diag_handler *hnld)
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
-static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
{
if (sock_diag_handlers[family] == NULL)
request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
@@ -112,7 +112,7 @@ static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
return sock_diag_handlers[family];
}
-static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
{
mutex_unlock(&sock_diag_table_mutex);
}
@@ -121,7 +121,7 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
struct sock_diag_req *req = NLMSG_DATA(nlh);
- struct sock_diag_handler *hndl;
+ const struct sock_diag_handler *hndl;
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c2850874254..a7c36845b123 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <net/ip.h>
#include <net/sock.h>
@@ -202,12 +203,6 @@ static struct ctl_table netns_core_table[] = {
{ }
};
-__net_initdata struct ctl_path net_core_path[] = {
- { .procname = "net", },
- { .procname = "core", },
- { },
-};
-
static __net_init int sysctl_core_net_init(struct net *net)
{
struct ctl_table *tbl;
@@ -223,8 +218,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
tbl[0].data = &net->core.sysctl_somaxconn;
}
- net->core.sysctl_hdr = register_net_sysctl_table(net,
- net_core_path, tbl);
+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
if (net->core.sysctl_hdr == NULL)
goto err_reg;
@@ -254,10 +248,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
static __init int sysctl_core_init(void)
{
- static struct ctl_table empty[1];
-
- register_sysctl_paths(net_core_path, empty);
- register_net_sysctl_rotable(net_core_path, net_core_table);
+ register_net_sysctl(&init_net, "net/core", net_core_table);
return register_pernet_subsys(&sysctl_core_ops);
}
diff --git a/net/core/utils.c b/net/core/utils.c
index dc3c3faff2f4..39895a65e54a 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -58,14 +58,11 @@ __be32 in_aton(const char *str)
int i;
l = 0;
- for (i = 0; i < 4; i++)
- {
+ for (i = 0; i < 4; i++) {
l <<= 8;
- if (*str != '\0')
- {
+ if (*str != '\0') {
val = 0;
- while (*str != '\0' && *str != '.' && *str != '\n')
- {
+ while (*str != '\0' && *str != '.' && *str != '\n') {
val *= 10;
val += *str - '0';
str++;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d86053002c16..656c7c75b192 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
+ [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
};
static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -703,6 +704,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
pid, seq, flags);
+ dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
out:
return ret;
}
@@ -935,6 +937,7 @@ static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
+ dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
return ret;
}
@@ -1205,13 +1208,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
if (!app)
goto nla_put_failure;
- if (app_info_type)
- NLA_PUT(skb, app_info_type, sizeof(info), &info);
-
- for (i = 0; i < app_count; i++)
- NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
- &table[i]);
+ if (app_info_type &&
+ nla_put(skb, app_info_type, sizeof(info), &info))
+ goto nla_put_failure;
+ for (i = 0; i < app_count; i++) {
+ if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
+ &table[i]))
+ goto nla_put_failure;
+ }
nla_nest_end(skb, app);
}
err = 0;
@@ -1230,8 +1235,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
int dcbx;
int err = -EMSGSIZE;
- NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+ if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+ goto nla_put_failure;
ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
if (!ieee)
goto nla_put_failure;
@@ -1239,15 +1244,28 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_getets) {
struct ieee_ets ets;
err = ops->ieee_getets(netdev, &ets);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
+ goto nla_put_failure;
+ }
+
+ if (ops->ieee_getmaxrate) {
+ struct ieee_maxrate maxrate;
+ err = ops->ieee_getmaxrate(netdev, &maxrate);
+ if (!err) {
+ err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
+ sizeof(maxrate), &maxrate);
+ if (err)
+ goto nla_put_failure;
+ }
}
if (ops->ieee_getpfc) {
struct ieee_pfc pfc;
err = ops->ieee_getpfc(netdev, &pfc);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
+ goto nla_put_failure;
}
app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1296,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_peer_getets) {
struct ieee_ets ets;
err = ops->ieee_peer_getets(netdev, &ets);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
+ goto nla_put_failure;
}
if (ops->ieee_peer_getpfc) {
struct ieee_pfc pfc;
err = ops->ieee_peer_getpfc(netdev, &pfc);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
+ goto nla_put_failure;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1360,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
&prio, &pgid, &tc_pct, &up_map);
- NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
- NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
- NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
- NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
+ if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
+ nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
+ nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
+ nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
+ goto nla_put_failure;
nla_nest_end(skb, tc_nest);
}
@@ -1356,7 +1377,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
else
ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
&tc_pct);
- NLA_PUT_U8(skb, i, tc_pct);
+ if (nla_put_u8(skb, i, tc_pct))
+ goto nla_put_failure;
}
nla_nest_end(skb, pg);
return 0;
@@ -1373,8 +1395,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
int dcbx, i, err = -EMSGSIZE;
u8 value;
- NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+ if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+ goto nla_put_failure;
cee = nla_nest_start(skb, DCB_ATTR_CEE);
if (!cee)
goto nla_put_failure;
@@ -1401,7 +1423,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
- NLA_PUT_U8(skb, i, value);
+ if (nla_put_u8(skb, i, value))
+ goto nla_put_failure;
}
nla_nest_end(skb, pfc_nest);
}
@@ -1454,8 +1477,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
i++)
- if (!ops->getfeatcfg(netdev, i, &value))
- NLA_PUT_U8(skb, i, value);
+ if (!ops->getfeatcfg(netdev, i, &value) &&
+ nla_put_u8(skb, i, value))
+ goto nla_put_failure;
nla_nest_end(skb, feat);
}
@@ -1464,15 +1488,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->cee_peer_getpg) {
struct cee_pg pg;
err = ops->cee_peer_getpg(netdev, &pg);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
+ goto nla_put_failure;
}
if (ops->cee_peer_getpfc) {
struct cee_pfc pfc;
err = ops->cee_peer_getpfc(netdev, &pfc);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+ if (!err &&
+ nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
+ goto nla_put_failure;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1615,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
goto err;
}
+ if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
+ struct ieee_maxrate *maxrate =
+ nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
+ err = ops->ieee_setmaxrate(netdev, maxrate);
+ if (err)
+ goto err;
+ }
+
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 70bfaf2d1965..8c67bedf85b0 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -100,7 +100,7 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
DCCP_BUG_ON(hc->tx_t_ipi == 0);
ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
- hc->tx_s, (unsigned)(hc->tx_x >> 6));
+ hc->tx_s, (unsigned int)(hc->tx_x >> 6));
}
static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -153,9 +153,9 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
if (hc->tx_x != old_x) {
ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
- "X_recv=%u\n", (unsigned)(old_x >> 6),
- (unsigned)(hc->tx_x >> 6), hc->tx_x_calc,
- (unsigned)(hc->tx_x_recv >> 6));
+ "X_recv=%u\n", (unsigned int)(old_x >> 6),
+ (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
+ (unsigned int)(hc->tx_x_recv >> 6));
ccid3_update_send_interval(hc);
}
@@ -425,8 +425,8 @@ done_computing_x:
"p=%u, X_calc=%u, X_recv=%u, X=%u\n",
dccp_role(sk), sk, hc->tx_rtt, r_sample,
hc->tx_s, hc->tx_p, hc->tx_x_calc,
- (unsigned)(hc->tx_x_recv >> 6),
- (unsigned)(hc->tx_x >> 6));
+ (unsigned int)(hc->tx_x_recv >> 6),
+ (unsigned int)(hc->tx_x >> 6));
/* unschedule no feedback timer */
sk_stop_timer(sk, &hc->tx_no_feedback_timer);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 29d6bb629a6c..9040be049d8c 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -75,7 +75,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
* state, about 60 seconds */
/* RFC 1122, 4.2.3.1 initial RTO value */
-#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
+#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
/*
* The maximum back-off value for retransmissions. This is needed for
@@ -84,7 +84,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
* - feature-negotiation retransmission (sec. 6.6.3),
* - Acks in client-PARTOPEN state (sec. 8.1.5).
*/
-#define DCCP_RTO_MAX ((unsigned)(64 * HZ))
+#define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
/*
* RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
@@ -287,9 +287,9 @@ extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
extern int dccp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct dccp_hdr *dh, unsigned len);
+ struct dccp_hdr *dh, unsigned int len);
extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct dccp_hdr *dh, const unsigned len);
+ const struct dccp_hdr *dh, const unsigned int len);
extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
extern void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 51d5fe5fffba..bc93a333931e 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -285,7 +285,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
}
static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct dccp_hdr *dh, const unsigned len)
+ const struct dccp_hdr *dh, const unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
@@ -366,7 +366,7 @@ discard:
}
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct dccp_hdr *dh, const unsigned len)
+ const struct dccp_hdr *dh, const unsigned int len)
{
if (dccp_check_seqno(sk, skb))
goto discard;
@@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_established);
static int dccp_rcv_request_sent_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
- const unsigned len)
+ const unsigned int len)
{
/*
* Step 4: Prepare sequence numbers in REQUEST
@@ -521,7 +521,7 @@ unable_to_proceed:
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
- const unsigned len)
+ const unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
@@ -572,7 +572,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
}
int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct dccp_hdr *dh, unsigned len)
+ struct dccp_hdr *dh, unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index caf6e1734b62..07f5579ca756 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -574,6 +574,11 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt);
}
+void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+{
+}
+EXPORT_SYMBOL(dccp_syn_ack_timeout);
+
static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct dccp_request_sock),
@@ -581,6 +586,7 @@ static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v4_reqsk_destructor,
.send_reset = dccp_v4_ctl_send_reset,
+ .syn_ack_timeout = dccp_syn_ack_timeout,
};
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4dc588f520e0..fa9512d86f3b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -343,6 +343,7 @@ static struct request_sock_ops dccp6_request_sock_ops = {
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v6_reqsk_destructor,
.send_reset = dccp_v6_ctl_send_reset,
+ .syn_ack_timeout = dccp_syn_ack_timeout,
};
static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
@@ -579,7 +580,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
- kfree_skb(ireq6->pktopts);
+ consume_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7065c0ae1e7b..6c7c78b83940 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
default:
dccp_pr_debug("packet_type=%s\n",
dccp_packet_name(dh->dccph_type));
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
}
verify_sock_status:
if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
len = skb->len;
found_fin_ok:
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
break;
} while (1);
out:
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 42348824ee31..607ab71b5a0c 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -98,18 +98,11 @@ static struct ctl_table dccp_default_table[] = {
{ }
};
-static struct ctl_path dccp_path[] = {
- { .procname = "net", },
- { .procname = "dccp", },
- { .procname = "default", },
- { }
-};
-
static struct ctl_table_header *dccp_table_header;
int __init dccp_sysctl_init(void)
{
- dccp_table_header = register_sysctl_paths(dccp_path,
+ dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default",
dccp_default_table);
return dccp_table_header != NULL ? 0 : -ENOMEM;
@@ -118,7 +111,7 @@ int __init dccp_sysctl_init(void)
void dccp_sysctl_exit(void)
{
if (dccp_table_header != NULL) {
- unregister_sysctl_table(dccp_table_header);
+ unregister_net_sysctl_table(dccp_table_header);
dccp_table_header = NULL;
}
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4136987d94da..2ba1a2814c24 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -250,7 +250,7 @@ static void dn_unhash_sock_bh(struct sock *sk)
static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
{
int i;
- unsigned hash = addr->sdn_objnum;
+ unsigned int hash = addr->sdn_objnum;
if (hash == 0) {
hash = addr->sdn_objnamel;
@@ -1844,9 +1844,9 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que
* inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
* make much practical difference.
*/
-unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
{
- unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
+ unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
if (dev) {
struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
mtu -= LL_RESERVED_SPACE(dev);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c00e3077988c..f3924ab1e019 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -209,15 +209,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
struct dn_dev_sysctl_table *t;
int i;
-#define DN_CTL_PATH_DEV 3
-
- struct ctl_path dn_ctl_path[] = {
- { .procname = "net", },
- { .procname = "decnet", },
- { .procname = "conf", },
- { /* to be set */ },
- { },
- };
+ char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
if (t == NULL)
@@ -228,15 +220,12 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
t->dn_dev_vars[i].data = ((char *)parms) + offset;
}
- if (dev) {
- dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
- } else {
- dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
- }
+ snprintf(path, sizeof(path), "net/decnet/conf/%s",
+ dev? dev->name : parms->name);
t->dn_dev_vars[0].extra1 = (void *)dev;
- t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
+ t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
if (t->sysctl_header == NULL)
kfree(t);
else
@@ -248,7 +237,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
if (parms->sysctl) {
struct dn_dev_sysctl_table *t = parms->sysctl;
parms->sysctl = NULL;
- unregister_sysctl_table(t->sysctl_header);
+ unregister_net_sysctl_table(t->sysctl_header);
kfree(t);
}
}
@@ -694,13 +683,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
- if (ifa->ifa_address)
- NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
- if (ifa->ifa_local)
- NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
- if (ifa->ifa_label[0])
- NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
-
+ if ((ifa->ifa_address &&
+ nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+ (ifa->ifa_local &&
+ nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
+ (ifa->ifa_label[0] &&
+ nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 9e885f180b60..7eaf98799729 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -302,11 +302,12 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
struct rtattr *attr = RTA_DATA(rta->rta_mx);
while(RTA_OK(attr, attrlen)) {
- unsigned flavour = attr->rta_type;
+ unsigned int flavour = attr->rta_type;
+
if (flavour) {
if (flavour > RTAX_MAX)
goto err_inval;
- fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr);
+ fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr);
}
attr = RTA_NEXT(attr, attrlen);
}
@@ -437,9 +438,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
res->fi = NULL;
return 1;
default:
- if (net_ratelimit())
- printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
- type);
+ net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
+ type);
res->fi = NULL;
return -EINVAL;
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ee7013f24fca..ac90f658586c 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -162,8 +162,8 @@ static int dn_neigh_construct(struct neighbour *neigh)
else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
dn_dn2eth(neigh->ha, dn->addr);
else {
- if (net_ratelimit())
- printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type);
+ net_dbg_ratelimited("Trying to create neigh for hw %d\n",
+ dev->type);
return -EINVAL;
}
@@ -236,15 +236,13 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_long_output: no memory\n");
+ net_crit_ratelimited("dn_long_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_long_output: Increasing headroom\n");
+ net_info_ratelimited("dn_long_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
@@ -281,15 +279,13 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_short_output: no memory\n");
+ net_crit_ratelimited("dn_short_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_short_output: Increasing headroom\n");
+ net_info_ratelimited("dn_short_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
@@ -322,15 +318,13 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
if (skb_headroom(skb) < headroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
if (skb2 == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "dn_phase3_output: no memory\n");
+ net_crit_ratelimited("dn_phase3_output: no memory\n");
kfree_skb(skb);
return -ENOBUFS;
}
kfree_skb(skb);
skb = skb2;
- if (net_ratelimit())
- printk(KERN_INFO "dn_phase3_output: Increasing headroom\n");
+ net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
}
data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index f6544b2c91b0..c344163e6ac0 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -80,12 +80,15 @@ extern int decnet_log_martians;
static void dn_log_martian(struct sk_buff *skb, const char *msg)
{
- if (decnet_log_martians && net_ratelimit()) {
+ if (decnet_log_martians) {
char *devname = skb->dev ? skb->dev->name : "???";
struct dn_skb_cb *cb = DN_SKB_CB(skb);
- printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
- msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
- le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port));
+ net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
+ msg, devname,
+ le16_to_cpu(cb->src),
+ le16_to_cpu(cb->dst),
+ le16_to_cpu(cb->src_port),
+ le16_to_cpu(cb->dst_port));
}
}
@@ -588,7 +591,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
number of warnings when compiling with -W --ANK
*/
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf) {
+ (unsigned int)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index e446e85e64a6..564a6ad13ce7 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -1,4 +1,3 @@
-
/*
* DECnet An implementation of the DECnet protocol suite for the LINUX
* operating system. DECnet is implemented using the BSD Socket
@@ -209,7 +208,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
*
* Returns: The number of times the packet has been sent previously
*/
-static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
+static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
gfp_t gfp)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -240,7 +239,7 @@ void dn_nsp_output(struct sock *sk)
{
struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb;
- unsigned reduce_win = 0;
+ unsigned int reduce_win = 0;
/*
* First we check for otherdata/linkservice messages
@@ -554,8 +553,8 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
unsigned char *msg;
if ((dst == NULL) || (rem == 0)) {
- if (net_ratelimit())
- printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst);
+ net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
+ le16_to_cpu(rem), dst);
return;
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 80a3de4906d3..586302e557ad 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -122,7 +122,7 @@ static int dn_route_input(struct sk_buff *);
static void dn_run_flush(unsigned long dummy);
static struct dn_rt_hash_bucket *dn_rt_hash_table;
-static unsigned dn_rt_hash_mask;
+static unsigned int dn_rt_hash_mask;
static struct timer_list dn_route_timer;
static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
@@ -149,13 +149,13 @@ static void dn_dst_destroy(struct dst_entry *dst)
dst_destroy_metrics_generic(dst);
}
-static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
+static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
tmp ^= (tmp >> 3);
tmp ^= (tmp >> 5);
tmp ^= (tmp >> 10);
- return dn_rt_hash_mask & (unsigned)tmp;
+ return dn_rt_hash_mask & (unsigned int)tmp;
}
static inline void dnrt_free(struct dn_route *rt)
@@ -297,7 +297,7 @@ static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
(fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
}
-static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
{
struct dn_route *rth;
struct dn_route __rcu **rthp;
@@ -748,8 +748,7 @@ static int dn_output(struct sk_buff *skb)
dn_to_neigh_output);
error:
- if (net_ratelimit())
- printk(KERN_DEBUG "dn_output: This should not happen\n");
+ net_dbg_ratelimited("dn_output: This should not happen\n");
kfree_skb(skb);
@@ -807,12 +806,10 @@ drop:
*/
static int dn_rt_bug(struct sk_buff *skb)
{
- if (net_ratelimit()) {
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
- printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
- le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
- }
+ net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
+ le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
kfree_skb(skb);
@@ -934,8 +931,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
struct dn_route *rt = NULL;
struct net_device *dev_out = NULL, *dev;
struct neighbour *neigh = NULL;
- unsigned hash;
- unsigned flags = 0;
+ unsigned int hash;
+ unsigned int flags = 0;
struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
int err;
int free_res = 0;
@@ -1209,7 +1206,7 @@ e_neighbour:
*/
static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
{
- unsigned hash = dn_hash(flp->saddr, flp->daddr);
+ unsigned int hash = dn_hash(flp->saddr, flp->daddr);
struct dn_route *rt = NULL;
if (!(flags & MSG_TRYHARD)) {
@@ -1275,7 +1272,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
struct net_device *out_dev = NULL;
struct dn_dev *dn_db;
struct neighbour *neigh = NULL;
- unsigned hash;
+ unsigned int hash;
int flags = 0;
__le16 gateway = 0;
__le16 local_src = 0;
@@ -1327,9 +1324,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
out_dev = DN_FIB_RES_DEV(res);
if (out_dev == NULL) {
- if (net_ratelimit())
- printk(KERN_CRIT "Bug in dn_route_input_slow() "
- "No output device\n");
+ net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
goto e_inval;
}
dev_hold(out_dev);
@@ -1490,7 +1485,7 @@ static int dn_route_input(struct sk_buff *skb)
{
struct dn_route *rt;
struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned hash = dn_hash(cb->src, cb->dst);
+ unsigned int hash = dn_hash(cb->src, cb->dst);
if (skb_dst(skb))
return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f65c9ddaee41..e65f2c856e06 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -177,11 +177,11 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
return 1;
}
-unsigned dnet_addr_type(__le16 addr)
+unsigned int dnet_addr_type(__le16 addr)
{
struct flowidn fld = { .daddr = addr };
struct dn_fib_res res;
- unsigned ret = RTN_UNICAST;
+ unsigned int ret = RTN_UNICAST;
struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
res.r = NULL;
@@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->src_len = r->src_len;
frh->tos = 0;
- if (r->dst_len)
- NLA_PUT_LE16(skb, FRA_DST, r->dst);
- if (r->src_len)
- NLA_PUT_LE16(skb, FRA_SRC, r->src);
-
+ if ((r->dst_len &&
+ nla_put_le16(skb, FRA_DST, r->dst)) ||
+ (r->src_len &&
+ nla_put_le16(skb, FRA_SRC, r->src)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index a9a62f225a6b..650f3380c98a 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -836,8 +836,8 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
if (!create)
return NULL;
- if (in_interrupt() && net_ratelimit()) {
- printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
+ if (in_interrupt()) {
+ net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
return NULL;
}
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1531135130db..44b890936fc0 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -57,8 +57,7 @@ nlmsg_failure:
if (skb)
kfree_skb(skb);
*errp = -ENOMEM;
- if (net_ratelimit())
- printk(KERN_ERR "dn_rtmsg: error creating netlink message\n");
+ net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
return NULL;
}
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 02e75d11cfbb..a55eeccaa72f 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -351,20 +351,14 @@ static ctl_table dn_table[] = {
{ }
};
-static struct ctl_path dn_path[] = {
- { .procname = "net", },
- { .procname = "decnet", },
- { }
-};
-
void dn_register_sysctl(void)
{
- dn_table_header = register_sysctl_paths(dn_path, dn_table);
+ dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
}
void dn_unregister_sysctl(void)
{
- unregister_sysctl_table(dn_table_header);
+ unregister_net_sysctl_table(dn_table_header);
}
#else /* CONFIG_SYSCTL */
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c73bba326d70..d9507dd05818 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@ MODULE_DESCRIPTION("DNS Resolver");
MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
-unsigned dns_resolver_debug;
+unsigned int dns_resolver_debug;
module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
@@ -249,9 +249,6 @@ static int __init init_dns_resolver(void)
struct key *keyring;
int ret;
- printk(KERN_NOTICE "Registering the %s key type\n",
- key_type_dns_resolver.name);
-
/* create an override credential set with a special thread keyring in
* which DNS requests are cached
*
@@ -301,8 +298,6 @@ static void __exit exit_dns_resolver(void)
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
put_cred(dns_resolver_cache);
- printk(KERN_NOTICE "Unregistered %s key type\n",
- key_type_dns_resolver.name);
}
module_init(init_dns_resolver)
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 189ca9e9b785..17c7886b5b3a 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -31,7 +31,7 @@ extern const struct cred *dns_resolver_cache;
/*
* debug tracing
*/
-extern unsigned dns_resolver_debug;
+extern unsigned int dns_resolver_debug;
#define kdebug(FMT, ...) \
do { \
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 56cf9b8e1c7c..e32083d5d8f8 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -66,7 +66,7 @@ static int dsa_slave_open(struct net_device *dev)
if (!(master->flags & IFF_UP))
return -ENETDOWN;
- if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto out;
@@ -89,7 +89,7 @@ clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
del_unicast:
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
return err;
@@ -107,7 +107,7 @@ static int dsa_slave_close(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(master, -1);
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
return 0;
@@ -146,13 +146,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
if (!(dev->flags & IFF_UP))
goto out;
- if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
+ if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
err = dev_uc_add(master, addr->sa_data);
if (err < 0)
return err;
}
- if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
diff --git a/net/econet/Kconfig b/net/econet/Kconfig
deleted file mode 100644
index 39a2d2975e0e..000000000000
--- a/net/econet/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Acorn Econet/AUN protocols
-#
-
-config ECONET
- tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET
- ---help---
- Econet is a fairly old and slow networking protocol mainly used by
- Acorn computers to access file and print servers. It uses native
- Econet network cards. AUN is an implementation of the higher level
- parts of Econet that runs over ordinary Ethernet connections, on
- top of the UDP packet protocol, which in turn runs on top of the
- Internet protocol IP.
-
- If you say Y here, you can choose with the next two options whether
- to send Econet/AUN traffic over a UDP Ethernet connection or over
- a native Econet network card.
-
- To compile this driver as a module, choose M here: the module
- will be called econet.
-
-config ECONET_AUNUDP
- bool "AUN over UDP"
- depends on ECONET
- help
- Say Y here if you want to send Econet/AUN traffic over a UDP
- connection (UDP is a packet based protocol that runs on top of the
- Internet protocol IP) using an ordinary Ethernet network card.
-
-config ECONET_NATIVE
- bool "Native Econet"
- depends on ECONET
- help
- Say Y here if you have a native Econet network card installed in
- your computer.
diff --git a/net/econet/Makefile b/net/econet/Makefile
deleted file mode 100644
index 05fae8be2fed..000000000000
--- a/net/econet/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Econet support code.
-#
-
-obj-$(CONFIG_ECONET) += econet.o
-
-econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
deleted file mode 100644
index 71b5edcee401..000000000000
--- a/net/econet/af_econet.c
+++ /dev/null
@@ -1,1172 +0,0 @@
-/*
- * An implementation of the Acorn Econet and AUN protocols.
- * Philip Blundell <philb@gnu.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#define pr_fmt(fmt) fmt
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/route.h>
-#include <linux/inet.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/skbuff.h>
-#include <linux/udp.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <net/sock.h>
-#include <net/inet_common.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/if_ec.h>
-#include <net/udp.h>
-#include <net/ip.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/uaccess.h>
-
-static const struct proto_ops econet_ops;
-static struct hlist_head econet_sklist;
-static DEFINE_SPINLOCK(econet_lock);
-static DEFINE_MUTEX(econet_mutex);
-
-/* Since there are only 256 possible network numbers (or fewer, depends
- how you count) it makes sense to use a simple lookup table. */
-static struct net_device *net2dev_map[256];
-
-#define EC_PORT_IP 0xd2
-
-#ifdef CONFIG_ECONET_AUNUDP
-static DEFINE_SPINLOCK(aun_queue_lock);
-static struct socket *udpsock;
-#define AUN_PORT 0x8000
-
-struct aunhdr {
- unsigned char code; /* AUN magic protocol byte */
- unsigned char port;
- unsigned char cb;
- unsigned char pad;
- unsigned long handle;
-};
-
-static unsigned long aun_seq;
-
-/* Queue of packets waiting to be transmitted. */
-static struct sk_buff_head aun_queue;
-static struct timer_list ab_cleanup_timer;
-
-#endif /* CONFIG_ECONET_AUNUDP */
-
-/* Per-packet information */
-struct ec_cb {
- struct sockaddr_ec sec;
- unsigned long cookie; /* Supplied by user. */
-#ifdef CONFIG_ECONET_AUNUDP
- int done;
- unsigned long seq; /* Sequencing */
- unsigned long timeout; /* Timeout */
- unsigned long start; /* jiffies */
-#endif
-#ifdef CONFIG_ECONET_NATIVE
- void (*sent)(struct sk_buff *, int result);
-#endif
-};
-
-static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
-{
- spin_lock_bh(&econet_lock);
- sk_del_node_init(sk);
- spin_unlock_bh(&econet_lock);
-}
-
-static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
-{
- spin_lock_bh(&econet_lock);
- sk_add_node(sk, list);
- spin_unlock_bh(&econet_lock);
-}
-
-/*
- * Pull a packet from our receive queue and hand it to the user.
- * If necessary we block.
- */
-
-static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- size_t copied;
- int err;
-
- msg->msg_namelen = sizeof(struct sockaddr_ec);
-
- mutex_lock(&econet_mutex);
-
- /*
- * Call the generic datagram receiver. This handles all sorts
- * of horrible races and re-entrancy so we can forget about it
- * in the protocol layers.
- *
- * Now it will return ENETDOWN, if device have just gone down,
- * but then it will block.
- */
-
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
-
- /*
- * An error occurred so return it. Because skb_recv_datagram()
- * handles the blocking we don't see and worry about blocking
- * retries.
- */
-
- if (skb == NULL)
- goto out;
-
- /*
- * You lose any data beyond the buffer you gave. If it worries a
- * user program they can ask the device for its MTU anyway.
- */
-
- copied = skb->len;
- if (copied > len) {
- copied = len;
- msg->msg_flags |= MSG_TRUNC;
- }
-
- /* We can't use skb_copy_datagram here */
- err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
- if (err)
- goto out_free;
- sk->sk_stamp = skb->tstamp;
-
- if (msg->msg_name)
- memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
-
- /*
- * Free or return the buffer as appropriate. Again this
- * hides all the races and re-entrancy issues from us.
- */
- err = copied;
-
-out_free:
- skb_free_datagram(sk, skb);
-out:
- mutex_unlock(&econet_mutex);
- return err;
-}
-
-/*
- * Bind an Econet socket.
- */
-
-static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
-{
- struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
- struct sock *sk;
- struct econet_sock *eo;
-
- /*
- * Check legality
- */
-
- if (addr_len < sizeof(struct sockaddr_ec) ||
- sec->sec_family != AF_ECONET)
- return -EINVAL;
-
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- eo = ec_sk(sk);
-
- eo->cb = sec->cb;
- eo->port = sec->port;
- eo->station = sec->addr.station;
- eo->net = sec->addr.net;
-
- mutex_unlock(&econet_mutex);
-
- return 0;
-}
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- * Queue a transmit result for the user to be told about.
- */
-
-static void tx_result(struct sock *sk, unsigned long cookie, int result)
-{
- struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
- struct ec_cb *eb;
- struct sockaddr_ec *sec;
-
- if (skb == NULL) {
- pr_debug("econet: memory squeeze, transmit result dropped\n");
- return;
- }
-
- eb = (struct ec_cb *)&skb->cb;
- sec = (struct sockaddr_ec *)&eb->sec;
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->cookie = cookie;
- sec->type = ECTYPE_TRANSMIT_STATUS | result;
- sec->sec_family = AF_ECONET;
-
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-/*
- * Called by the Econet hardware driver when a packet transmit
- * has completed. Tell the user.
- */
-
-static void ec_tx_done(struct sk_buff *skb, int result)
-{
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- tx_result(skb->sk, eb->cookie, result);
-}
-#endif
-
-/*
- * Send a packet. We have to work out which device it's going out on
- * and hence whether to use real Econet or the UDP emulation.
- */
-
-static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len)
-{
- struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
- struct net_device *dev;
- struct ec_addr addr;
- int err;
- unsigned char port, cb;
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
- struct sock *sk = sock->sk;
- struct sk_buff *skb;
- struct ec_cb *eb;
-#endif
-#ifdef CONFIG_ECONET_AUNUDP
- struct msghdr udpmsg;
- struct iovec iov[2];
- struct aunhdr ah;
- struct sockaddr_in udpdest;
- __kernel_size_t size;
- mm_segment_t oldfs;
- char *userbuf;
-#endif
-
- /*
- * Check the flags.
- */
-
- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
- return -EINVAL;
-
- /*
- * Get and verify the address.
- */
-
- mutex_lock(&econet_mutex);
-
- if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
- mutex_unlock(&econet_mutex);
- return -EINVAL;
- }
- addr.station = saddr->addr.station;
- addr.net = saddr->addr.net;
- port = saddr->port;
- cb = saddr->cb;
-
- /* Look for a device with the right network number. */
- dev = net2dev_map[addr.net];
-
- /* If not directly reachable, use some default */
- if (dev == NULL) {
- dev = net2dev_map[0];
- /* No interfaces at all? */
- if (dev == NULL) {
- mutex_unlock(&econet_mutex);
- return -ENETDOWN;
- }
- }
-
- if (dev->type == ARPHRD_ECONET) {
- /* Real hardware Econet. We're not worthy etc. */
-#ifdef CONFIG_ECONET_NATIVE
- unsigned short proto = 0;
- int hlen, tlen;
- int res;
-
- if (len + 15 > dev->mtu) {
- mutex_unlock(&econet_mutex);
- return -EMSGSIZE;
- }
-
- dev_hold(dev);
-
- hlen = LL_RESERVED_SPACE(dev);
- tlen = dev->needed_tailroom;
- skb = sock_alloc_send_skb(sk, len + hlen + tlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
- goto out_unlock;
-
- skb_reserve(skb, hlen);
- skb_reset_network_header(skb);
-
- eb = (struct ec_cb *)&skb->cb;
-
- eb->cookie = saddr->cookie;
- eb->sec = *saddr;
- eb->sent = ec_tx_done;
-
- err = -EINVAL;
- res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
- if (res < 0)
- goto out_free;
- if (res > 0) {
- struct ec_framehdr *fh;
- /* Poke in our control byte and
- port number. Hack, hack. */
- fh = (struct ec_framehdr *)skb->data;
- fh->cb = cb;
- fh->port = port;
- if (sock->type != SOCK_DGRAM) {
- skb_reset_tail_pointer(skb);
- skb->len = 0;
- }
- }
-
- /* Copy the data. Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
- skb->protocol = proto;
- skb->dev = dev;
- skb->priority = sk->sk_priority;
- if (err)
- goto out_free;
-
- err = -ENETDOWN;
- if (!(dev->flags & IFF_UP))
- goto out_free;
-
- /*
- * Now send it
- */
-
- dev_queue_xmit(skb);
- dev_put(dev);
- mutex_unlock(&econet_mutex);
- return len;
-
-out_free:
- kfree_skb(skb);
-out_unlock:
- if (dev)
- dev_put(dev);
-#else
- err = -EPROTOTYPE;
-#endif
- mutex_unlock(&econet_mutex);
-
- return err;
- }
-
-#ifdef CONFIG_ECONET_AUNUDP
- /* AUN virtual Econet. */
-
- if (udpsock == NULL) {
- mutex_unlock(&econet_mutex);
- return -ENETDOWN; /* No socket - can't send */
- }
-
- if (len > 32768) {
- err = -E2BIG;
- goto error;
- }
-
- /* Make up a UDP datagram and hand it off to some higher intellect. */
-
- memset(&udpdest, 0, sizeof(udpdest));
- udpdest.sin_family = AF_INET;
- udpdest.sin_port = htons(AUN_PORT);
-
- /* At the moment we use the stupid Acorn scheme of Econet address
- y.x maps to IP a.b.c.x. This should be replaced with something
- more flexible and more aware of subnet masks. */
- {
- struct in_device *idev;
- unsigned long network = 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (idev) {
- if (idev->ifa_list)
- network = ntohl(idev->ifa_list->ifa_address) &
- 0xffffff00; /* !!! */
- }
- rcu_read_unlock();
- udpdest.sin_addr.s_addr = htonl(network | addr.station);
- }
-
- memset(&ah, 0, sizeof(ah));
- ah.port = port;
- ah.cb = cb & 0x7f;
- ah.code = 2; /* magic */
-
- /* tack our header on the front of the iovec */
- size = sizeof(struct aunhdr);
- iov[0].iov_base = (void *)&ah;
- iov[0].iov_len = size;
-
- userbuf = vmalloc(len);
- if (userbuf == NULL) {
- err = -ENOMEM;
- goto error;
- }
-
- iov[1].iov_base = userbuf;
- iov[1].iov_len = len;
- err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
- if (err)
- goto error_free_buf;
-
- /* Get a skbuff (no data, just holds our cb information) */
- skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
- goto error_free_buf;
-
- eb = (struct ec_cb *)&skb->cb;
-
- eb->cookie = saddr->cookie;
- eb->timeout = 5 * HZ;
- eb->start = jiffies;
- ah.handle = aun_seq;
- eb->seq = (aun_seq++);
- eb->sec = *saddr;
-
- skb_queue_tail(&aun_queue, skb);
-
- udpmsg.msg_name = (void *)&udpdest;
- udpmsg.msg_namelen = sizeof(udpdest);
- udpmsg.msg_iov = &iov[0];
- udpmsg.msg_iovlen = 2;
- udpmsg.msg_control = NULL;
- udpmsg.msg_controllen = 0;
- udpmsg.msg_flags = 0;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS); /* More privs :-) */
- err = sock_sendmsg(udpsock, &udpmsg, size);
- set_fs(oldfs);
-
-error_free_buf:
- vfree(userbuf);
-error:
-#else
- err = -EPROTOTYPE;
-#endif
- mutex_unlock(&econet_mutex);
-
- return err;
-}
-
-/*
- * Look up the address of a socket.
- */
-
-static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
-{
- struct sock *sk;
- struct econet_sock *eo;
- struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
-
- if (peer)
- return -EOPNOTSUPP;
-
- memset(sec, 0, sizeof(*sec));
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- eo = ec_sk(sk);
-
- sec->sec_family = AF_ECONET;
- sec->port = eo->port;
- sec->addr.station = eo->station;
- sec->addr.net = eo->net;
-
- mutex_unlock(&econet_mutex);
-
- *uaddr_len = sizeof(*sec);
- return 0;
-}
-
-static void econet_destroy_timer(unsigned long data)
-{
- struct sock *sk = (struct sock *)data;
-
- if (!sk_has_allocations(sk)) {
- sk_free(sk);
- return;
- }
-
- sk->sk_timer.expires = jiffies + 10 * HZ;
- add_timer(&sk->sk_timer);
- pr_debug("econet: socket destroy delayed\n");
-}
-
-/*
- * Close an econet socket.
- */
-
-static int econet_release(struct socket *sock)
-{
- struct sock *sk;
-
- mutex_lock(&econet_mutex);
-
- sk = sock->sk;
- if (!sk)
- goto out_unlock;
-
- econet_remove_socket(&econet_sklist, sk);
-
- /*
- * Now the socket is dead. No more input will appear.
- */
-
- sk->sk_state_change(sk); /* It is useless. Just for sanity. */
-
- sock_orphan(sk);
-
- /* Purge queues */
-
- skb_queue_purge(&sk->sk_receive_queue);
-
- if (sk_has_allocations(sk)) {
- sk->sk_timer.data = (unsigned long)sk;
- sk->sk_timer.expires = jiffies + HZ;
- sk->sk_timer.function = econet_destroy_timer;
- add_timer(&sk->sk_timer);
-
- goto out_unlock;
- }
-
- sk_free(sk);
-
-out_unlock:
- mutex_unlock(&econet_mutex);
- return 0;
-}
-
-static struct proto econet_proto = {
- .name = "ECONET",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct econet_sock),
-};
-
-/*
- * Create an Econet socket
- */
-
-static int econet_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
- struct econet_sock *eo;
- int err;
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
- /* Econet only provides datagram services. */
- if (sock->type != SOCK_DGRAM)
- return -ESOCKTNOSUPPORT;
-
- sock->state = SS_UNCONNECTED;
-
- err = -ENOBUFS;
- sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
- if (sk == NULL)
- goto out;
-
- sk->sk_reuse = 1;
- sock->ops = &econet_ops;
- sock_init_data(sock, sk);
-
- eo = ec_sk(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
- sk->sk_family = PF_ECONET;
- eo->num = protocol;
-
- econet_insert_socket(&econet_sklist, sk);
- return 0;
-out:
- return err;
-}
-
-/*
- * Handle Econet specific ioctls
- */
-
-static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
-{
- struct ifreq ifr;
- struct ec_device *edev;
- struct net_device *dev;
- struct sockaddr_ec *sec;
- int err;
-
- /*
- * Fetch the caller's info block into kernel space
- */
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- return -EFAULT;
-
- dev = dev_get_by_name(&init_net, ifr.ifr_name);
- if (dev == NULL)
- return -ENODEV;
-
- sec = (struct sockaddr_ec *)&ifr.ifr_addr;
-
- mutex_lock(&econet_mutex);
-
- err = 0;
- switch (cmd) {
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
-
- edev = dev->ec_ptr;
- if (edev == NULL) {
- /* Magic up a new one. */
- edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
- if (edev == NULL) {
- err = -ENOMEM;
- break;
- }
- dev->ec_ptr = edev;
- } else
- net2dev_map[edev->net] = NULL;
- edev->station = sec->addr.station;
- edev->net = sec->addr.net;
- net2dev_map[sec->addr.net] = dev;
- if (!net2dev_map[0])
- net2dev_map[0] = dev;
- break;
-
- case SIOCGIFADDR:
- edev = dev->ec_ptr;
- if (edev == NULL) {
- err = -ENODEV;
- break;
- }
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->addr.station = edev->station;
- sec->addr.net = edev->net;
- sec->sec_family = AF_ECONET;
- dev_put(dev);
- if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
- err = -EFAULT;
- break;
-
- default:
- err = -EINVAL;
- break;
- }
-
- mutex_unlock(&econet_mutex);
-
- dev_put(dev);
-
- return err;
-}
-
-/*
- * Handle generic ioctls
- */
-
-static int econet_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case SIOCGSTAMP:
- return sock_get_timestamp(sk, argp);
-
- case SIOCGSTAMPNS:
- return sock_get_timestampns(sk, argp);
-
- case SIOCSIFADDR:
- case SIOCGIFADDR:
- return ec_dev_ioctl(sock, cmd, argp);
-
- }
-
- return -ENOIOCTLCMD;
-}
-
-static const struct net_proto_family econet_family_ops = {
- .family = PF_ECONET,
- .create = econet_create,
- .owner = THIS_MODULE,
-};
-
-static const struct proto_ops econet_ops = {
- .family = PF_ECONET,
- .owner = THIS_MODULE,
- .release = econet_release,
- .bind = econet_bind,
- .connect = sock_no_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = econet_getname,
- .poll = datagram_poll,
- .ioctl = econet_ioctl,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = sock_no_setsockopt,
- .getsockopt = sock_no_getsockopt,
- .sendmsg = econet_sendmsg,
- .recvmsg = econet_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-};
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- * Find the listening socket, if any, for the given data.
- */
-
-static struct sock *ec_listening_socket(unsigned char port, unsigned char
- station, unsigned char net)
-{
- struct sock *sk;
- struct hlist_node *node;
-
- spin_lock(&econet_lock);
- sk_for_each(sk, node, &econet_sklist) {
- struct econet_sock *opt = ec_sk(sk);
- if ((opt->port == port || opt->port == 0) &&
- (opt->station == station || opt->station == 0) &&
- (opt->net == net || opt->net == 0)) {
- sock_hold(sk);
- goto found;
- }
- }
- sk = NULL;
-found:
- spin_unlock(&econet_lock);
- return sk;
-}
-
-/*
- * Queue a received packet for a socket.
- */
-
-static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
- unsigned char stn, unsigned char net,
- unsigned char cb, unsigned char port)
-{
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
-
- memset(sec, 0, sizeof(struct sockaddr_ec));
- sec->sec_family = AF_ECONET;
- sec->type = ECTYPE_PACKET_RECEIVED;
- sec->port = port;
- sec->cb = cb;
- sec->addr.net = net;
- sec->addr.station = stn;
-
- return sock_queue_rcv_skb(sk, skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_AUNUDP
-/*
- * Send an AUN protocol response.
- */
-
-static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
-{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(AUN_PORT),
- .sin_addr = {.s_addr = addr}
- };
- struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
- struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
- struct msghdr udpmsg;
-
- udpmsg.msg_name = (void *)&sin;
- udpmsg.msg_namelen = sizeof(sin);
- udpmsg.msg_control = NULL;
- udpmsg.msg_controllen = 0;
- udpmsg.msg_flags = 0;
-
- kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
-}
-
-
-/*
- * Handle incoming AUN packets. Work out if anybody wants them,
- * and send positive or negative acknowledgements as appropriate.
- */
-
-static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
-{
- struct iphdr *ip = ip_hdr(skb);
- unsigned char stn = ntohl(ip->saddr) & 0xff;
- struct dst_entry *dst = skb_dst(skb);
- struct ec_device *edev = NULL;
- struct sock *sk = NULL;
- struct sk_buff *newskb;
-
- if (dst)
- edev = dst->dev->ec_ptr;
-
- if (!edev)
- goto bad;
-
- sk = ec_listening_socket(ah->port, stn, edev->net);
- if (sk == NULL)
- goto bad; /* Nobody wants it */
-
- newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
- GFP_ATOMIC);
- if (newskb == NULL) {
- pr_debug("AUN: memory squeeze, dropping packet\n");
- /* Send nack and hope sender tries again */
- goto bad;
- }
-
- memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
- len - sizeof(struct aunhdr));
-
- if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
- /* Socket is bankrupt. */
- kfree_skb(newskb);
- goto bad;
- }
-
- aun_send_response(ip->saddr, ah->handle, 3, 0);
- sock_put(sk);
- return;
-
-bad:
- aun_send_response(ip->saddr, ah->handle, 4, 0);
- if (sk)
- sock_put(sk);
-}
-
-/*
- * Handle incoming AUN transmit acknowledgements. If the sequence
- * number matches something in our backlog then kill it and tell
- * the user. If the remote took too long to reply then we may have
- * dropped the packet already.
- */
-
-static void aun_tx_ack(unsigned long seq, int result)
-{
- struct sk_buff *skb;
- unsigned long flags;
- struct ec_cb *eb;
-
- spin_lock_irqsave(&aun_queue_lock, flags);
- skb_queue_walk(&aun_queue, skb) {
- eb = (struct ec_cb *)&skb->cb;
- if (eb->seq == seq)
- goto foundit;
- }
- spin_unlock_irqrestore(&aun_queue_lock, flags);
- pr_debug("AUN: unknown sequence %ld\n", seq);
- return;
-
-foundit:
- tx_result(skb->sk, eb->cookie, result);
- skb_unlink(skb, &aun_queue);
- spin_unlock_irqrestore(&aun_queue_lock, flags);
- kfree_skb(skb);
-}
-
-/*
- * Deal with received AUN frames - sort out what type of thing it is
- * and hand it to the right function.
- */
-
-static void aun_data_available(struct sock *sk, int slen)
-{
- int err;
- struct sk_buff *skb;
- unsigned char *data;
- struct aunhdr *ah;
- size_t len;
-
- while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
- if (err == -EAGAIN) {
- pr_err("AUN: no data available?!\n");
- return;
- }
- pr_debug("AUN: recvfrom() error %d\n", -err);
- }
-
- data = skb_transport_header(skb) + sizeof(struct udphdr);
- ah = (struct aunhdr *)data;
- len = skb->len - sizeof(struct udphdr);
-
- switch (ah->code) {
- case 2:
- aun_incoming(skb, ah, len);
- break;
- case 3:
- aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
- break;
- case 4:
- aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
- break;
- default:
- pr_debug("AUN: unknown packet type: %d\n", data[0]);
- }
-
- skb_free_datagram(sk, skb);
-}
-
-/*
- * Called by the timer to manage the AUN transmit queue. If a packet
- * was sent to a dead or nonexistent host then we will never get an
- * acknowledgement back. After a few seconds we need to spot this and
- * drop the packet.
- */
-
-static void ab_cleanup(unsigned long h)
-{
- struct sk_buff *skb, *n;
- unsigned long flags;
-
- spin_lock_irqsave(&aun_queue_lock, flags);
- skb_queue_walk_safe(&aun_queue, skb, n) {
- struct ec_cb *eb = (struct ec_cb *)&skb->cb;
- if ((jiffies - eb->start) > eb->timeout) {
- tx_result(skb->sk, eb->cookie,
- ECTYPE_TRANSMIT_NOT_PRESENT);
- skb_unlink(skb, &aun_queue);
- kfree_skb(skb);
- }
- }
- spin_unlock_irqrestore(&aun_queue_lock, flags);
-
- mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
-}
-
-static int __init aun_udp_initialise(void)
-{
- int error;
- struct sockaddr_in sin;
-
- skb_queue_head_init(&aun_queue);
- setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
- ab_cleanup_timer.expires = jiffies + (HZ * 2);
- add_timer(&ab_cleanup_timer);
-
- memset(&sin, 0, sizeof(sin));
- sin.sin_port = htons(AUN_PORT);
-
- /* We can count ourselves lucky Acorn machines are too dim to
- speak IPv6. :-) */
- error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
- if (error < 0) {
- pr_err("AUN: socket error %d\n", -error);
- return error;
- }
-
- udpsock->sk->sk_reuse = 1;
- udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
- from interrupts */
-
- error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
- sizeof(sin));
- if (error < 0) {
- pr_err("AUN: bind error %d\n", -error);
- goto release;
- }
-
- udpsock->sk->sk_data_ready = aun_data_available;
-
- return 0;
-
-release:
- sock_release(udpsock);
- udpsock = NULL;
- return error;
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-
-/*
- * Receive an Econet frame from a device.
- */
-
-static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- struct ec_framehdr *hdr;
- struct sock *sk = NULL;
- struct ec_device *edev = dev->ec_ptr;
-
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto drop;
-
- if (!edev)
- goto drop;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == NULL)
- return NET_RX_DROP;
-
- if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
- goto drop;
-
- hdr = (struct ec_framehdr *)skb->data;
-
- /* First check for encapsulated IP */
- if (hdr->port == EC_PORT_IP) {
- skb->protocol = htons(ETH_P_IP);
- skb_pull(skb, sizeof(struct ec_framehdr));
- netif_rx(skb);
- return NET_RX_SUCCESS;
- }
-
- sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
- if (!sk)
- goto drop;
-
- if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
- hdr->port))
- goto drop;
- sock_put(sk);
- return NET_RX_SUCCESS;
-
-drop:
- if (sk)
- sock_put(sk);
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static struct packet_type econet_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_ECONET),
- .func = econet_rcv,
-};
-
-static void econet_hw_initialise(void)
-{
- dev_add_pack(&econet_packet_type);
-}
-
-#endif
-
-static int econet_notifier(struct notifier_block *this, unsigned long msg,
- void *data)
-{
- struct net_device *dev = data;
- struct ec_device *edev;
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- switch (msg) {
- case NETDEV_UNREGISTER:
- /* A device has gone down - kill any data we hold for it. */
- edev = dev->ec_ptr;
- if (edev) {
- if (net2dev_map[0] == dev)
- net2dev_map[0] = NULL;
- net2dev_map[edev->net] = NULL;
- kfree(edev);
- dev->ec_ptr = NULL;
- }
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block econet_netdev_notifier = {
- .notifier_call = econet_notifier,
-};
-
-static void __exit econet_proto_exit(void)
-{
-#ifdef CONFIG_ECONET_AUNUDP
- del_timer(&ab_cleanup_timer);
- if (udpsock)
- sock_release(udpsock);
-#endif
- unregister_netdevice_notifier(&econet_netdev_notifier);
-#ifdef CONFIG_ECONET_NATIVE
- dev_remove_pack(&econet_packet_type);
-#endif
- sock_unregister(econet_family_ops.family);
- proto_unregister(&econet_proto);
-}
-
-static int __init econet_proto_init(void)
-{
- int err = proto_register(&econet_proto, 0);
-
- if (err != 0)
- goto out;
- sock_register(&econet_family_ops);
-#ifdef CONFIG_ECONET_AUNUDP
- aun_udp_initialise();
-#endif
-#ifdef CONFIG_ECONET_NATIVE
- econet_hw_initialise();
-#endif
- register_netdevice_notifier(&econet_netdev_notifier);
-out:
- return err;
-}
-
-module_init(econet_proto_init);
-module_exit(econet_proto_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_ECONET);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index bf10a311cf1c..36e58800a9e3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -77,7 +77,7 @@ __setup("ether=", netdev_boot_setup);
*/
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
@@ -164,7 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
- if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
@@ -179,7 +179,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
*/
else if (1 /*dev->flags&IFF_PROMISC */ ) {
- if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+ dev->dev_addr)))
skb->pkt_type = PACKET_OTHERHOST;
}
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 368515885368..32eb4179e8fa 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -196,7 +196,7 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
static void
lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
{
- memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
+ memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
/* second bit-flip (Universe/Local) is done according RFC2464 */
ipaddr->s6_addr[8] ^= 0x02;
}
@@ -221,7 +221,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
if (lladdr)
lowpan_raw_dump_inline(__func__, "linklocal address",
- lladdr, IEEE802154_ALEN);
+ lladdr, IEEE802154_ADDR_LEN);
if (prefcount > 0)
memcpy(ipaddr, prefix, prefcount);
@@ -371,7 +371,7 @@ err:
static int lowpan_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type, const void *_daddr,
- const void *_saddr, unsigned len)
+ const void *_saddr, unsigned int len)
{
u8 tmp, iphc0, iphc1, *hc06_ptr;
struct ipv6hdr *hdr;
@@ -650,6 +650,53 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
kfree(entry);
}
+static struct lowpan_fragment *
+lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
+{
+ struct lowpan_fragment *frame;
+
+ frame = kzalloc(sizeof(struct lowpan_fragment),
+ GFP_ATOMIC);
+ if (!frame)
+ goto frame_err;
+
+ INIT_LIST_HEAD(&frame->list);
+
+ frame->length = (iphc0 & 7) | (len << 3);
+ frame->tag = tag;
+
+ /* allocate buffer for frame assembling */
+ frame->skb = alloc_skb(frame->length +
+ sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+ if (!frame->skb)
+ goto skb_err;
+
+ frame->skb->priority = skb->priority;
+ frame->skb->dev = skb->dev;
+
+ /* reserve headroom for uncompressed ipv6 header */
+ skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+ skb_put(frame->skb, frame->length);
+
+ init_timer(&frame->timer);
+ /* time out is the same as for ipv6 - 60 sec */
+ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+ frame->timer.data = (unsigned long)frame;
+ frame->timer.function = lowpan_fragment_timer_expired;
+
+ add_timer(&frame->timer);
+
+ list_add_tail(&frame->list, &lowpan_fragments);
+
+ return frame;
+
+skb_err:
+ kfree(frame);
+frame_err:
+ return NULL;
+}
+
static int
lowpan_process_data(struct sk_buff *skb)
{
@@ -692,41 +739,9 @@ lowpan_process_data(struct sk_buff *skb)
/* alloc new frame structure */
if (!found) {
- frame = kzalloc(sizeof(struct lowpan_fragment),
- GFP_ATOMIC);
+ frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
if (!frame)
goto unlock_and_drop;
-
- INIT_LIST_HEAD(&frame->list);
-
- frame->length = (iphc0 & 7) | (len << 3);
- frame->tag = tag;
-
- /* allocate buffer for frame assembling */
- frame->skb = alloc_skb(frame->length +
- sizeof(struct ipv6hdr), GFP_ATOMIC);
-
- if (!frame->skb) {
- kfree(frame);
- goto unlock_and_drop;
- }
-
- frame->skb->priority = skb->priority;
- frame->skb->dev = skb->dev;
-
- /* reserve headroom for uncompressed ipv6 header */
- skb_reserve(frame->skb, sizeof(struct ipv6hdr));
- skb_put(frame->skb, frame->length);
-
- init_timer(&frame->timer);
- /* time out is the same as for ipv6 - 60 sec */
- frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
- frame->timer.data = (unsigned long)frame;
- frame->timer.function = lowpan_fragment_timer_expired;
-
- add_timer(&frame->timer);
-
- list_add_tail(&frame->list, &lowpan_fragments);
}
if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
@@ -1044,6 +1059,24 @@ static void lowpan_dev_free(struct net_device *dev)
free_netdev(dev);
}
+static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
+}
+
+static u16 lowpan_get_pan_id(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static u16 lowpan_get_short_addr(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
@@ -1053,6 +1086,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
+static struct ieee802154_mlme_ops lowpan_mlme = {
+ .get_pan_id = lowpan_get_pan_id,
+ .get_phy = lowpan_get_phy,
+ .get_short_addr = lowpan_get_short_addr,
+};
+
static void lowpan_setup(struct net_device *dev)
{
pr_debug("(%s)\n", __func__);
@@ -1070,6 +1109,7 @@ static void lowpan_setup(struct net_device *dev)
dev->netdev_ops = &lowpan_netdev_ops;
dev->header_ops = &lowpan_header_ops;
+ dev->ml_priv = &lowpan_mlme;
dev->destructor = lowpan_dev_free;
}
@@ -1143,6 +1183,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ spin_lock_init(&flist_lock);
+
register_netdevice(dev);
return 0;
@@ -1152,11 +1194,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
{
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry;
- struct lowpan_dev_record *tmp;
+ struct lowpan_dev_record *entry, *tmp;
+ struct lowpan_fragment *frame, *tframe;
ASSERT_RTNL();
+ spin_lock(&flist_lock);
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+ del_timer(&frame->timer);
+ list_del(&frame->list);
+ dev_kfree_skb(frame->skb);
+ kfree(frame);
+ }
+ spin_unlock(&flist_lock);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index aeff3f310482..8c2251fb0a3f 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -53,9 +53,6 @@
#ifndef __6LOWPAN_H__
#define __6LOWPAN_H__
-/* need to know address length to manipulate with it */
-#define IEEE802154_ALEN 8
-
#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1b09eaabaac1..6fbb2ad7bb6d 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -44,8 +44,8 @@ struct dgram_sock {
struct ieee802154_addr src_addr;
struct ieee802154_addr dst_addr;
- unsigned bound:1;
- unsigned want_ack:1;
+ unsigned int bound:1;
+ unsigned int want_ack:1;
};
static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -206,7 +206,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t size)
{
struct net_device *dev;
- unsigned mtu;
+ unsigned int mtu;
struct sk_buff *skb;
struct dgram_sock *ro = dgram_sk(sk);
int hlen, tlen;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index adaf46214905..ca92587720f4 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
- addr->hwaddr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+ addr->hwaddr) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
@@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+ nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- if (addr->addr_type == IEEE802154_ADDR_LONG)
- NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
- addr->hwaddr);
- else
- NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
- addr->short_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr))
+ goto nla_put_failure;
+ if (addr->addr_type == IEEE802154_ADDR_LONG) {
+ if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+ addr->hwaddr))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+ addr->short_addr))
+ goto nla_put_failure;
+ }
+ if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
+ nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
- NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
- NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
- NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
-
- if (edl)
- NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
+ nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
+ nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
+ nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
+ (edl &&
+ nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
if (!msg)
return -ENOBUFS;
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+ goto nla_put_failure;
return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
nla_put_failure:
@@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
phy = ieee802154_mlme_ops(dev)->get_phy(dev);
BUG_ON(!phy);
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
- ieee802154_mlme_ops(dev)->get_short_addr(dev));
- NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
- ieee802154_mlme_ops(dev)->get_pan_id(dev));
+ if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+ nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+ nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+ nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr) ||
+ nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
+ ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
+ nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
+ ieee802154_mlme_ops(dev)->get_pan_id(dev)))
+ goto nla_put_failure;
wpan_phy_put(phy);
return genlmsg_end(msg, hdr);
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index c64a38d57aa3..eed291626da6 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
goto out;
mutex_lock(&phy->pib_lock);
- NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
- NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
+ if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+ nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+ goto nla_put_failure;
for (i = 0; i < 32; i++) {
if (phy->channels_supported[i])
buf[pages++] = phy->channels_supported[i] | (i << 27);
}
- if (pages)
- NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
- pages * sizeof(uint32_t), buf);
-
+ if (pages &&
+ nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+ pages * sizeof(uint32_t), buf))
+ goto nla_put_failure;
mutex_unlock(&phy->pib_lock);
kfree(buf);
return genlmsg_end(msg, hdr);
@@ -179,6 +179,7 @@ static int ieee802154_add_iface(struct sk_buff *skb,
const char *devname;
int rc = -ENOBUFS;
struct net_device *dev;
+ int type = __IEEE802154_DEV_INVALID;
pr_debug("%s\n", __func__);
@@ -221,7 +222,13 @@ static int ieee802154_add_iface(struct sk_buff *skb,
goto nla_put_failure;
}
- dev = phy->add_iface(phy, devname);
+ if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
+ type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
+ if (type >= __IEEE802154_DEV_MAX)
+ return -EINVAL;
+ }
+
+ dev = phy->add_iface(phy, devname, type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
@@ -245,9 +252,9 @@ static int ieee802154_add_iface(struct sk_buff *skb,
goto dev_unregister;
}
- NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+ goto nla_put_failure;
dev_put(dev);
wpan_phy_put(phy);
@@ -333,10 +340,9 @@ static int ieee802154_del_iface(struct sk_buff *skb,
rtnl_unlock();
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
-
+ if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
+ goto nla_put_failure;
wpan_phy_put(phy);
return ieee802154_nl_reply(msg, info);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index f96bae8fd330..50e823927d49 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -106,7 +106,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
struct net_device *dev;
- unsigned mtu;
+ unsigned int mtu;
struct sk_buff *skb;
int hlen, tlen;
int err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index d183262943d9..20f1cb5c8aba 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -262,8 +262,8 @@ config ARPD
bool "IP: ARP daemon support"
---help---
The kernel maintains an internal cache which maps IP addresses to
- hardware addresses on the local network, so that Ethernet/Token Ring/
- etc. frames are sent to the proper address on the physical networking
+ hardware addresses on the local network, so that Ethernet
+ frames are sent to the proper address on the physical networking
layer. Normally, kernel uses the ARP protocol to resolve these
mappings.
@@ -312,7 +312,7 @@ config SYN_COOKIES
config INET_AH
tristate "IP: AH transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
@@ -324,7 +324,7 @@ config INET_AH
config INET_ESP
tristate "IP: ESP transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_AUTHENC
select CRYPTO_HMAC
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 10e3751466b5..c8f7aee587d1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -350,7 +350,7 @@ lookup_protocol:
err = 0;
sk->sk_no_check = answer_no_check;
if (INET_PROTOSW_REUSE & answer_flags)
- sk->sk_reuse = 1;
+ sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -541,7 +541,7 @@ out:
}
EXPORT_SYMBOL(inet_bind);
-int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index fd508b526014..e8f2617ecd47 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -77,7 +77,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
{
- unsigned char * optptr = (unsigned char*)(iph+1);
+ unsigned char *optptr = (unsigned char *)(iph+1);
int l = iph->ihl*4 - sizeof(struct iphdr);
int optlen;
@@ -406,8 +406,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
- ntohl(ah->spi), ntohl(iph->daddr));
+ pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
+ ntohl(ah->spi), ntohl(iph->daddr));
xfrm_state_put(x);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 18d9b81ecb1a..cda37be02f8d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -73,6 +73,8 @@
* Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -89,7 +91,6 @@
#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/if_arp.h>
-#include <linux/trdevice.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -193,9 +194,6 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
case ARPHRD_IEEE802:
ip_eth_mc_map(addr, haddr);
return 0;
- case ARPHRD_IEEE802_TR:
- ip_tr_mc_map(addr, haddr);
- return 0;
case ARPHRD_INFINIBAND:
ip_ib_mc_map(addr, dev->broadcast, haddr);
return 0;
@@ -364,8 +362,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
probes -= neigh->parms->ucast_probes;
if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID))
- printk(KERN_DEBUG
- "trying to ucast probe in NUD_INVALID\n");
+ pr_debug("trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
} else {
@@ -452,7 +449,7 @@ static int arp_set_predefined(int addr_hint, unsigned char *haddr,
{
switch (addr_hint) {
case RTN_LOCAL:
- printk(KERN_DEBUG "ARP: arp called for own IP address\n");
+ pr_debug("arp called for own IP address\n");
memcpy(haddr, dev->dev_addr, dev->addr_len);
return 1;
case RTN_MULTICAST:
@@ -473,7 +470,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
struct neighbour *n;
if (!skb_dst(skb)) {
- printk(KERN_DEBUG "arp_find is called with dst==NULL\n");
+ pr_debug("arp_find is called with dst==NULL\n");
kfree_skb(skb);
return 1;
}
@@ -648,12 +645,6 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp->ar_pro = htons(ETH_P_IP);
break;
#endif
-#if IS_ENABLED(CONFIG_TR)
- case ARPHRD_IEEE802_TR:
- arp->ar_hrd = htons(ARPHRD_IEEE802);
- arp->ar_pro = htons(ETH_P_IP);
- break;
-#endif
}
arp->ar_hln = dev->addr_len;
@@ -751,11 +742,10 @@ static int arp_process(struct sk_buff *skb)
goto out;
break;
case ARPHRD_ETHER:
- case ARPHRD_IEEE802_TR:
case ARPHRD_FDDI:
case ARPHRD_IEEE802:
/*
- * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
+ * ETHERNET, and Fibre Channel (which are IEEE 802
* devices, according to RFC 2625) devices will accept ARP
* hardware types of either 1 (Ethernet) or 6 (IEEE 802.2).
* This is the case also of FDDI, where the RFC 1390 says that
@@ -1059,7 +1049,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
err = PTR_ERR(neigh);
if (!IS_ERR(neigh)) {
- unsigned state = NUD_STALE;
+ unsigned int state = NUD_STALE;
if (r->arp_flags & ATF_PERM)
state = NUD_PERMANENT;
err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
@@ -1071,7 +1061,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
return err;
}
-static unsigned arp_state_to_flags(struct neighbour *neigh)
+static unsigned int arp_state_to_flags(struct neighbour *neigh)
{
if (neigh->nud_state&NUD_PERMANENT)
return ATF_PERM | ATF_COM;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6e447ff94dfa..10e15a144e95 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -217,8 +217,7 @@ void in_dev_finish_destroy(struct in_device *idev)
WARN_ON(idev->ifa_list);
WARN_ON(idev->mc_list);
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
- idev, dev ? dev->name : "NIL");
+ pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead)
@@ -1125,7 +1124,7 @@ skip:
}
}
-static inline bool inetdev_valid_mtu(unsigned mtu)
+static inline bool inetdev_valid_mtu(unsigned int mtu)
{
return mtu >= 68;
}
@@ -1174,7 +1173,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
- printk(KERN_DEBUG "inetdev_event: bug\n");
+ pr_debug("%s: bug\n", __func__);
RCU_INIT_POINTER(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
@@ -1266,17 +1265,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
- if (ifa->ifa_address)
- NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address);
-
- if (ifa->ifa_local)
- NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local);
-
- if (ifa->ifa_broadcast)
- NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
-
- if (ifa->ifa_label[0])
- NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
+ if ((ifa->ifa_address &&
+ nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+ (ifa->ifa_local &&
+ nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+ (ifa->ifa_broadcast &&
+ nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+ (ifa->ifa_label[0] &&
+ nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -1587,7 +1584,6 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
static struct devinet_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
- char *dev_name;
} devinet_sysctl = {
.devinet_vars = {
DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -1629,16 +1625,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
{
int i;
struct devinet_sysctl_table *t;
-
-#define DEVINET_CTL_PATH_DEV 3
-
- struct ctl_path devinet_ctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { .procname = "conf", },
- { /* to be set */ },
- { },
- };
+ char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
if (!t)
@@ -1650,27 +1637,15 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
t->devinet_vars[i].extra2 = net;
}
- /*
- * Make a copy of dev_name, because '.procname' is regarded as const
- * by sysctl and we wouldn't want anyone to change it under our feet
- * (see SIOCSIFNAME).
- */
- t->dev_name = kstrdup(dev_name, GFP_KERNEL);
- if (!t->dev_name)
- goto free;
-
- devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name;
+ snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
- t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path,
- t->devinet_vars);
+ t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
if (!t->sysctl_header)
- goto free_procname;
+ goto free;
p->sysctl = t;
return 0;
-free_procname:
- kfree(t->dev_name);
free:
kfree(t);
out:
@@ -1686,7 +1661,6 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
cnf->sysctl = NULL;
unregister_net_sysctl_table(t->sysctl_header);
- kfree(t->dev_name);
kfree(t);
}
@@ -1716,12 +1690,6 @@ static struct ctl_table ctl_forward_entry[] = {
},
{ },
};
-
-static __net_initdata struct ctl_path net_ipv4_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { },
-};
#endif
static __net_init int devinet_init_net(struct net *net)
@@ -1767,7 +1735,7 @@ static __net_init int devinet_init_net(struct net *net)
goto err_reg_dflt;
err = -ENOMEM;
- forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
if (forw_hdr == NULL)
goto err_reg_ctl;
net->ipv4.forw_hdr = forw_hdr;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 89a47b35905d..cb982a61536f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
-
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
+ unsigned int net_adj;
switch (x->props.mode) {
- case XFRM_MODE_TUNNEL:
- break;
- default:
case XFRM_MODE_TRANSPORT:
- /* The worst case */
- mtu -= blksize - 4;
- mtu += min_t(u32, blksize - 4, rem);
- break;
case XFRM_MODE_BEET:
- /* The worst case. */
- mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
+ net_adj = sizeof(struct iphdr);
break;
+ case XFRM_MODE_TUNNEL:
+ net_adj = 0;
+ break;
+ default:
+ BUG();
}
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cbe3a68507cf..3854411fa37c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -136,13 +136,13 @@ static void fib_flush(struct net *net)
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
*/
-static inline unsigned __inet_dev_addr_type(struct net *net,
- const struct net_device *dev,
- __be32 addr)
+static inline unsigned int __inet_dev_addr_type(struct net *net,
+ const struct net_device *dev,
+ __be32 addr)
{
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res;
- unsigned ret = RTN_BROADCAST;
+ unsigned int ret = RTN_BROADCAST;
struct fib_table *local_table;
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
@@ -740,7 +740,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
#define BRD_OK 2
#define BRD0_OK 4
#define BRD1_OK 8
- unsigned ok = 0;
+ unsigned int ok = 0;
int subnet = 0; /* Primary network */
int gone = 1; /* Address is missing */
int same_prefsrc = 0; /* Another primary with same IP */
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 799fc790b3cf..2d043f71ef70 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->src_len = rule4->src_len;
frh->tos = rule4->tos;
- if (rule4->dst_len)
- NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
-
- if (rule4->src_len)
- NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-
+ if ((rule4->dst_len &&
+ nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+ (rule4->src_len &&
+ nla_put_be32(skb, FRA_SRC, rule4->src)))
+ goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (rule4->tclassid)
- NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
+ if (rule4->tclassid &&
+ nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
+ goto nla_put_failure;
#endif
return 0;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5063fa38ac7b..e5b7182fa099 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -145,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ change_nexthops(fi) {
+ if (nexthop_nh->nh_dev)
+ dev_put(nexthop_nh->nh_dev);
+ } endfor_nexthops(fi);
+
+ release_net(fi->fib_net);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
@@ -156,13 +162,7 @@ void free_fib_info(struct fib_info *fi)
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
- change_nexthops(fi) {
- if (nexthop_nh->nh_dev)
- dev_put(nexthop_nh->nh_dev);
- nexthop_nh->nh_dev = NULL;
- } endfor_nexthops(fi);
fib_info_cnt--;
- release_net(fi->fib_net);
call_rcu(&fi->rcu, free_fib_info_rcu);
}
@@ -931,33 +931,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
rtm->rtm_table = tb_id;
else
rtm->rtm_table = RT_TABLE_COMPAT;
- NLA_PUT_U32(skb, RTA_TABLE, tb_id);
+ if (nla_put_u32(skb, RTA_TABLE, tb_id))
+ goto nla_put_failure;
rtm->rtm_type = type;
rtm->rtm_flags = fi->fib_flags;
rtm->rtm_scope = fi->fib_scope;
rtm->rtm_protocol = fi->fib_protocol;
- if (rtm->rtm_dst_len)
- NLA_PUT_BE32(skb, RTA_DST, dst);
-
- if (fi->fib_priority)
- NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
-
+ if (rtm->rtm_dst_len &&
+ nla_put_be32(skb, RTA_DST, dst))
+ goto nla_put_failure;
+ if (fi->fib_priority &&
+ nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
+ goto nla_put_failure;
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
goto nla_put_failure;
- if (fi->fib_prefsrc)
- NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
-
+ if (fi->fib_prefsrc &&
+ nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+ goto nla_put_failure;
if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw)
- NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
-
- if (fi->fib_nh->nh_oif)
- NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
+ if (fi->fib_nh->nh_gw &&
+ nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+ goto nla_put_failure;
+ if (fi->fib_nh->nh_oif &&
+ nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
+ goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (fi->fib_nh[0].nh_tclassid)
- NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
+ if (fi->fib_nh[0].nh_tclassid &&
+ nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+ goto nla_put_failure;
#endif
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -978,11 +981,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
rtnh->rtnh_hops = nh->nh_weight - 1;
rtnh->rtnh_ifindex = nh->nh_oif;
- if (nh->nh_gw)
- NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
+ if (nh->nh_gw &&
+ nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+ goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (nh->nh_tclassid)
- NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
+ if (nh->nh_tclassid &&
+ nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+ goto nla_put_failure;
#endif
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1a37b4..30b88d7b4bd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
continue;
+ if (fi->fib_dead)
+ continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 2cb2bf845641..c75efbdc71cb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -713,11 +713,10 @@ static void icmp_unreach(struct sk_buff *skb)
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
- if (net_ratelimit())
- pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
- &ip_hdr(skb)->saddr,
- icmph->type, icmph->code,
- &iph->daddr, skb->dev->name);
+ net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
+ &ip_hdr(skb)->saddr,
+ icmph->type, icmph->code,
+ &iph->daddr, skb->dev->name);
goto out;
}
@@ -906,8 +905,7 @@ out_err:
static void icmp_address(struct sk_buff *skb)
{
#if 0
- if (net_ratelimit())
- printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
+ net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
#endif
}
@@ -943,10 +941,10 @@ static void icmp_address_reply(struct sk_buff *skb)
inet_ifa_match(ip_hdr(skb)->saddr, ifa))
break;
}
- if (!ifa && net_ratelimit()) {
- pr_info("Wrong address mask %pI4 from %s/%pI4\n",
- mp, dev->name, &ip_hdr(skb)->saddr);
- }
+ if (!ifa)
+ net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
+ mp,
+ dev->name, &ip_hdr(skb)->saddr);
}
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5dfecfd7d5e9..6699f23e6f55 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -344,10 +344,10 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
ip_select_ident(pip, &rt->dst, NULL);
- ((u8*)&pip[1])[0] = IPOPT_RA;
- ((u8*)&pip[1])[1] = 4;
- ((u8*)&pip[1])[2] = 0;
- ((u8*)&pip[1])[3] = 0;
+ ((u8 *)&pip[1])[0] = IPOPT_RA;
+ ((u8 *)&pip[1])[1] = 4;
+ ((u8 *)&pip[1])[2] = 0;
+ ((u8 *)&pip[1])[3] = 0;
skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
skb_put(skb, sizeof(*pig));
@@ -688,10 +688,10 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
ip_select_ident(iph, &rt->dst, NULL);
- ((u8*)&iph[1])[0] = IPOPT_RA;
- ((u8*)&iph[1])[1] = 4;
- ((u8*)&iph[1])[2] = 0;
- ((u8*)&iph[1])[3] = 0;
+ ((u8 *)&iph[1])[0] = IPOPT_RA;
+ ((u8 *)&iph[1])[1] = 4;
+ ((u8 *)&iph[1])[2] = 0;
+ ((u8 *)&iph[1])[3] = 0;
ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
ih->type = type;
@@ -774,7 +774,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
- continue;
+ break;
if (srcs[i] == psf->sf_inaddr) {
scount++;
break;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19d66cefd7d3..95e61596e605 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -42,7 +42,8 @@ EXPORT_SYMBOL(sysctl_local_reserved_ports);
void inet_get_local_port_range(int *low, int *high)
{
- unsigned seq;
+ unsigned int seq;
+
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -53,7 +54,7 @@ void inet_get_local_port_range(int *low, int *high)
EXPORT_SYMBOL(inet_get_local_port_range);
int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb)
+ const struct inet_bind_bucket *tb, bool relax)
{
struct sock *sk2;
struct hlist_node *node;
@@ -79,6 +80,14 @@ int inet_csk_bind_conflict(const struct sock *sk,
sk2_rcv_saddr == sk_rcv_saddr(sk))
break;
}
+ if (!relax && reuse && sk2->sk_reuse &&
+ sk2->sk_state != TCP_LISTEN) {
+ const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+
+ if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+ sk2_rcv_saddr == sk_rcv_saddr(sk))
+ break;
+ }
}
}
return node != NULL;
@@ -122,12 +131,13 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+ !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
snum = smallest_rover;
goto tb_found;
}
}
- if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
snum = rover;
goto tb_found;
}
@@ -172,18 +182,22 @@ have_snum:
goto tb_not_found;
tb_found:
if (!hlist_empty(&tb->owners)) {
+ if (sk->sk_reuse == SK_FORCE_REUSE)
+ goto success;
+
if (tb->fastreuse > 0 &&
sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size == -1) {
goto success;
} else {
ret = 1;
- if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size != -1 && --attempts >= 0) {
spin_unlock(&head->lock);
goto again;
}
+
goto fail_unlock;
}
}
@@ -514,7 +528,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
- * If synack was not acknowledged for 3 seconds, it means
+ * If synack was not acknowledged for 1 second, it means
* one of the following things: synack was lost, ack was lost,
* rtt is high or nobody planned to ack (i.e. synflood).
* When server is a bit loaded, queue is populated with old
@@ -555,8 +569,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
syn_ack_recalc(req, thresh, max_retries,
queue->rskq_defer_accept,
&expire, &resend);
- if (req->rsk_ops->syn_ack_timeout)
- req->rsk_ops->syn_ack_timeout(parent, req);
+ req->rsk_ops->syn_ack_timeout(parent, req);
if (!expire &&
(!resend ||
!req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557eb..46d1e7199a8c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto rtattr_failure;
if (icsk == NULL) {
- r->idiag_rqueue = r->idiag_wqueue = 0;
+ handler->idiag_get_info(sk, r, NULL);
goto out;
}
@@ -999,12 +999,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
}
-static struct sock_diag_handler inet_diag_handler = {
+static const struct sock_diag_handler inet_diag_handler = {
.family = AF_INET,
.dump = inet_diag_handler_dump,
};
-static struct sock_diag_handler inet6_diag_handler = {
+static const struct sock_diag_handler inet6_diag_handler = {
.family = AF_INET6,
.dump = inet_diag_handler_dump,
};
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 984ec656b03b..7880af970208 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -217,7 +217,7 @@ begin:
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
-struct sock * __inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 89168c6351ff..2784db3155fb 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -89,8 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
#ifdef SOCK_REFCNT_DEBUG
if (atomic_read(&tw->tw_refcnt) != 1) {
- printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
- tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
+ pr_debug("%s timewait_sock %p refcnt=%d\n",
+ tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
}
#endif
while (refcnt) {
@@ -263,7 +263,7 @@ rescan:
void inet_twdr_hangman(unsigned long data)
{
struct inet_timewait_death_row *twdr;
- int unsigned need_timer;
+ unsigned int need_timer;
twdr = (struct inet_timewait_death_row *)data;
spin_lock(&twdr->death_lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6c7168..e5c44fc586ab 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -41,7 +41,7 @@
static int ip_forward_finish(struct sk_buff *skb)
{
- struct ip_options * opt = &(IPCB(skb)->opt);
+ struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
@@ -55,7 +55,7 @@ int ip_forward(struct sk_buff *skb)
{
struct iphdr *iph; /* Our header */
struct rtable *rt; /* Route we use */
- struct ip_options * opt = &(IPCB(skb)->opt);
+ struct ip_options *opt = &(IPCB(skb)->opt);
if (skb_warn_if_lro(skb))
goto drop;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3727e234c884..9dbd3dd6022d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -148,17 +148,17 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q)
return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
}
-static int ip4_frag_match(struct inet_frag_queue *q, void *a)
+static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
{
struct ipq *qp;
struct ip4_create_arg *arg = a;
qp = container_of(q, struct ipq, q);
return qp->id == arg->iph->id &&
- qp->saddr == arg->iph->saddr &&
- qp->daddr == arg->iph->daddr &&
- qp->protocol == arg->iph->protocol &&
- qp->user == arg->user;
+ qp->saddr == arg->iph->saddr &&
+ qp->daddr == arg->iph->daddr &&
+ qp->protocol == arg->iph->protocol &&
+ qp->user == arg->user;
}
/* Memory Tracking Functions. */
@@ -545,6 +545,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
int len;
int ihlen;
int err;
+ int sum_truesize;
u8 ecn;
ipq_kill(qp);
@@ -569,7 +570,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
skb_morph(head, qp->q.fragments);
head->next = qp->q.fragments->next;
- kfree_skb(qp->q.fragments);
+ consume_skb(qp->q.fragments);
qp->q.fragments = head;
}
@@ -611,19 +612,32 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
atomic_add(clone->truesize, &qp->q.net->mem);
}
- skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
}
- atomic_sub(head->truesize, &qp->q.net->mem);
+ atomic_sub(sum_truesize, &qp->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -644,8 +658,7 @@ out_nomem:
err = -ENOMEM;
goto out_fail;
out_oversize:
- if (net_ratelimit())
- pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
@@ -782,7 +795,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
table[2].data = &net->ipv4.frags.timeout;
}
- hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+ hdr = register_net_sysctl(net, "net/ipv4", table);
if (hdr == NULL)
goto err_reg;
@@ -807,7 +820,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
static void ip4_frags_ctl_register(void)
{
- register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
+ register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
}
#else
static inline int ip4_frags_ns_ctl_register(struct net *net)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d4742c..f49047b79609 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -169,37 +169,56 @@ struct ipgre_net {
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
-static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
- struct pcpu_tstats sum = { 0 };
int i;
for_each_possible_cpu(i) {
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
}
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
+
+ tot->multicast = dev->stats.multicast;
+ tot->rx_crc_errors = dev->stats.rx_crc_errors;
+ tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ return tot;
}
/* Given src, dst and key, find appropriate for input tunnel. */
-static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
- __be32 remote, __be32 local,
- __be32 key, __be16 gre_proto)
+static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
+ __be32 remote, __be32 local,
+ __be32 key, __be16 gre_proto)
{
struct net *net = dev_net(dev);
int link = dev->ifindex;
@@ -464,7 +483,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
*/
const struct iphdr *iph = (const struct iphdr *)skb->data;
- __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
+ __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
int grehlen = (iph->ihl<<2) + 4;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
@@ -574,7 +593,7 @@ static int ipgre_rcv(struct sk_buff *skb)
iph = ip_hdr(skb);
h = skb->data;
- flags = *(__be16*)h;
+ flags = *(__be16 *)h;
if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
/* - Version must be 0.
@@ -598,11 +617,11 @@ static int ipgre_rcv(struct sk_buff *skb)
offset += 4;
}
if (flags&GRE_KEY) {
- key = *(__be32*)(h + offset);
+ key = *(__be32 *)(h + offset);
offset += 4;
}
if (flags&GRE_SEQ) {
- seqno = ntohl(*(__be32*)(h + offset));
+ seqno = ntohl(*(__be32 *)(h + offset));
offset += 4;
}
}
@@ -672,8 +691,10 @@ static int ipgre_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
__skb_tunnel_rx(skb, tunnel->dev);
@@ -900,7 +921,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
htons(ETH_P_TEB) : skb->protocol;
if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
- __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
+ __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
if (tunnel->parms.o_flags&GRE_SEQ) {
++tunnel->o_seqno;
@@ -913,7 +934,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
if (tunnel->parms.o_flags&GRE_CSUM) {
*ptr = 0;
- *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
+ *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
}
}
@@ -1169,7 +1190,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
{
struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
- __be16 *p = (__be16*)(iph+1);
+ __be16 *p = (__be16 *)(iph+1);
memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
p[0] = t->parms.o_flags;
@@ -1253,7 +1274,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_start_xmit = ipgre_tunnel_xmit,
.ndo_do_ioctl = ipgre_tunnel_ioctl,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
- .ndo_get_stats = ipgre_get_stats,
+ .ndo_get_stats64 = ipgre_get_stats64,
};
static void ipgre_dev_free(struct net_device *dev)
@@ -1507,7 +1528,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
- .ndo_get_stats = ipgre_get_stats,
+ .ndo_get_stats64 = ipgre_get_stats64,
};
static void ipgre_tap_setup(struct net_device *dev)
@@ -1654,17 +1675,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
- NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
- NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
- NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
- NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
- NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
- NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
- NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
- NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
- NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
- NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
-
+ if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+ nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+ nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+ nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+ nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+ nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+ nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+ nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
+ nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
+ nla_put_u8(skb, IFLA_GRE_PMTUDISC,
+ !!(p->iph.frag_off & htons(IP_DF))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26eccc5bab1c..8590144ca330 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -210,9 +210,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
int ret;
if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
- if (net_ratelimit())
- printk("%s: proto %d isn't netns-ready\n",
- __func__, protocol);
+ net_info_ratelimited("%s: proto %d isn't netns-ready\n",
+ __func__, protocol);
kfree_skb(skb);
goto out;
}
@@ -298,10 +297,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
if (in_dev) {
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
- if (IN_DEV_LOG_MARTIANS(in_dev) &&
- net_ratelimit())
- pr_info("source route option %pI4 -> %pI4\n",
- &iph->saddr, &iph->daddr);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_info_ratelimited("source route option %pI4 -> %pI4\n",
+ &iph->saddr,
+ &iph->daddr);
goto drop;
}
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index a0d0d9d9b870..708b99494e23 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -210,10 +210,10 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
* Simple and stupid 8), but the most efficient way.
*/
-void ip_options_fragment(struct sk_buff * skb)
+void ip_options_fragment(struct sk_buff *skb)
{
unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
- struct ip_options * opt = &(IPCB(skb)->opt);
+ struct ip_options *opt = &(IPCB(skb)->opt);
int l = opt->optlen;
int optlen;
@@ -248,13 +248,13 @@ void ip_options_fragment(struct sk_buff * skb)
*/
int ip_options_compile(struct net *net,
- struct ip_options * opt, struct sk_buff * skb)
+ struct ip_options *opt, struct sk_buff *skb)
{
int l;
- unsigned char * iph;
- unsigned char * optptr;
+ unsigned char *iph;
+ unsigned char *optptr;
int optlen;
- unsigned char * pp_ptr = NULL;
+ unsigned char *pp_ptr = NULL;
struct rtable *rt = NULL;
if (skb != NULL) {
@@ -413,7 +413,7 @@ int ip_options_compile(struct net *net,
opt->is_changed = 1;
}
} else {
- unsigned overflow = optptr[3]>>4;
+ unsigned int overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
goto error;
@@ -473,20 +473,20 @@ EXPORT_SYMBOL(ip_options_compile);
* Undo all the changes done by ip_options_compile().
*/
-void ip_options_undo(struct ip_options * opt)
+void ip_options_undo(struct ip_options *opt)
{
if (opt->srr) {
- unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr);
+ unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr);
memmove(optptr+7, optptr+3, optptr[1]-7);
memcpy(optptr+3, &opt->faddr, 4);
}
if (opt->rr_needaddr) {
- unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr);
+ unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr);
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
}
if (opt->ts) {
- unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr);
+ unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr);
if (opt->ts_needtime) {
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
@@ -549,8 +549,8 @@ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
void ip_forward_options(struct sk_buff *skb)
{
- struct ip_options * opt = &(IPCB(skb)->opt);
- unsigned char * optptr;
+ struct ip_options *opt = &(IPCB(skb)->opt);
+ unsigned char *optptr;
struct rtable *rt = skb_rtable(skb);
unsigned char *raw = skb_network_header(skb);
@@ -578,8 +578,10 @@ void ip_forward_options(struct sk_buff *skb)
ip_hdr(skb)->daddr = opt->nexthop;
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
optptr[2] = srrptr+4;
- } else if (net_ratelimit())
- pr_crit("%s(): Argh! Destination lost!\n", __func__);
+ } else {
+ net_crit_ratelimited("%s(): Argh! Destination lost!\n",
+ __func__);
+ }
if (opt->ts_needaddr) {
optptr = raw + opt->ts;
ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4910176d24ed..451f97c42eb4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -214,8 +214,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
}
rcu_read_unlock();
- if (net_ratelimit())
- printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
+ net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+ __func__);
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2fd0fba77124..0d11f234d615 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -90,7 +90,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
- struct ip_options * opt = (struct ip_options *)optbuf;
+ struct ip_options *opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
@@ -147,7 +147,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(skb->sk);
- unsigned flags = inet->cmsg_flags;
+ unsigned int flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
if (flags & 1)
@@ -673,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
break;
} else {
memset(&mreq, 0, sizeof(mreq));
- if (optlen >= sizeof(struct in_addr) &&
- copy_from_user(&mreq.imr_address, optval,
- sizeof(struct in_addr)))
- break;
+ if (optlen >= sizeof(struct ip_mreq)) {
+ if (copy_from_user(&mreq, optval,
+ sizeof(struct ip_mreq)))
+ break;
+ } else if (optlen >= sizeof(struct in_addr)) {
+ if (copy_from_user(&mreq.imr_address, optval,
+ sizeof(struct in_addr)))
+ break;
+ }
}
if (!mreq.imr_ifindex) {
@@ -1094,7 +1099,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
*/
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen, unsigned flags)
+ char __user *optval, int __user *optlen, unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
int val;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 92ac7e7363a0..67e8a6b086ea 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -808,8 +808,6 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
b->op = BOOTP_REQUEST;
if (dev->type < 256) /* check for false types */
b->htype = dev->type;
- else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */
- b->htype = ARPHRD_IEEE802;
else if (dev->type == ARPHRD_FDDI)
b->htype = ARPHRD_ETHER;
else {
@@ -955,8 +953,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Fragments are not supported */
if (ip_is_fragment(h)) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Ignoring fragmented reply\n");
+ net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n");
goto drop;
}
@@ -1004,16 +1001,14 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Is it a reply to our BOOTP request? */
if (b->op != BOOTP_REPLY ||
b->xid != d->xid) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
- b->op, b->xid);
+ net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
+ b->op, b->xid);
goto drop_unlock;
}
/* Is it a reply for the device we are configuring? */
if (b->xid != ic_dev_xid) {
- if (net_ratelimit())
- pr_err("DHCP/BOOTP: Ignoring delayed packet\n");
+ net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n");
goto drop_unlock;
}
@@ -1198,7 +1193,7 @@ static int __init ic_dynamic(void)
d = ic_first_dev;
retries = CONF_SEND_RETRIES;
get_random_bytes(&timeout, sizeof(timeout));
- timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
+ timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
for (;;) {
/* Track the device we are configuring */
ic_dev_xid = d->xid;
@@ -1626,11 +1621,13 @@ static int __init ip_auto_config_setup(char *addrs)
return 1;
}
+__setup("ip=", ip_auto_config_setup);
static int __init nfsaddrs_config_setup(char *addrs)
{
return ip_auto_config_setup(addrs);
}
+__setup("nfsaddrs=", nfsaddrs_config_setup);
static int __init vendor_class_identifier_setup(char *addrs)
{
@@ -1641,7 +1638,4 @@ static int __init vendor_class_identifier_setup(char *addrs)
vendor_class_identifier);
return 1;
}
-
-__setup("ip=", ip_auto_config_setup);
-__setup("nfsaddrs=", nfsaddrs_config_setup);
__setup("dhcpclass=", vendor_class_identifier_setup);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ae1413e3f2f8..2d0f99bf61b3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,33 +144,48 @@ static void ipip_dev_free(struct net_device *dev);
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
-static struct net_device_stats *ipip_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
- struct pcpu_tstats sum = { 0 };
int i;
for_each_possible_cpu(i) {
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
}
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
+
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+ tot->collisions = dev->stats.collisions;
+
+ return tot;
}
-static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
__be32 remote, __be32 local)
{
unsigned int h0 = HASH(remote);
@@ -245,7 +260,7 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
rcu_assign_pointer(*tp, t);
}
-static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
__be32 remote = parms->iph.daddr;
@@ -404,8 +419,10 @@ static int ipip_rcv(struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
__skb_tunnel_rx(skb, tunnel->dev);
@@ -730,7 +747,7 @@ static const struct net_device_ops ipip_netdev_ops = {
.ndo_start_xmit = ipip_tunnel_xmit,
.ndo_do_ioctl = ipip_tunnel_ioctl,
.ndo_change_mtu = ipip_tunnel_change_mtu,
- .ndo_get_stats = ipip_get_stats,
+ .ndo_get_stats64 = ipip_get_stats64,
};
static void ipip_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 960fbfc3e976..a9e519ad6db5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -949,8 +949,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
ret = sock_queue_rcv_skb(mroute_sk, skb);
rcu_read_unlock();
if (ret < 0) {
- if (net_ratelimit())
- pr_warn("mroute: pending queue full, dropping entries\n");
+ net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
kfree_skb(skb);
}
@@ -2119,15 +2118,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
rtm->rtm_src_len = 32;
rtm->rtm_tos = 0;
rtm->rtm_table = mrt->id;
- NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+ if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+ goto nla_put_failure;
rtm->rtm_type = RTN_MULTICAST;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = RTPROT_UNSPEC;
rtm->rtm_flags = 0;
- NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
- NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
-
+ if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
+ nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+ goto nla_put_failure;
if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
goto nla_put_failure;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4f47e064e262..ed1b36783192 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
+int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
{
struct net *net = dev_net(skb_dst(skb)->dev);
const struct iphdr *iph = ip_hdr(skb);
@@ -237,13 +237,3 @@ static void ipv4_netfilter_fini(void)
module_init(ipv4_netfilter_init);
module_exit(ipv4_netfilter_fini);
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { .procname = "netfilter", },
- { }
-};
-EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 240b68469a7a..c20674dc9452 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -66,6 +66,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
# just filtering instance of ARP tables for now
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
-
-obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
-
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f68917f..97e61eadf580 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -221,9 +221,8 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
static unsigned int
arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_err("arp_tables: error: '%s'\n",
- (const char *)par->targinfo);
+ net_err_ratelimited("arp_tables: error: '%s'\n",
+ (const char *)par->targinfo);
return NF_DROP;
}
@@ -303,7 +302,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
- verdict = (unsigned)(-v) - 1;
+ verdict = (unsigned int)(-v) - 1;
break;
}
e = back;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
deleted file mode 100644
index 94d45e1f8882..000000000000
--- a/net/ipv4/netfilter/ip_queue.c
+++ /dev/null
@@ -1,639 +0,0 @@
-/*
- * This is a module which is used for queueing IPv4 packets and
- * communicating with userspace via netlink.
- *
- * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
- * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/notifier.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netlink.h>
-#include <linux/spinlock.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/security.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/route.h>
-#include <net/netfilter/nf_queue.h>
-#include <net/ip.h>
-
-#define IPQ_QMAX_DEFAULT 1024
-#define IPQ_PROC_FS_NAME "ip_queue"
-#define NET_IPQ_QMAX 2088
-#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
-
-typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
-
-static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
-static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_SPINLOCK(queue_lock);
-static int peer_pid __read_mostly;
-static unsigned int copy_range __read_mostly;
-static unsigned int queue_total;
-static unsigned int queue_dropped = 0;
-static unsigned int queue_user_dropped = 0;
-static struct sock *ipqnl __read_mostly;
-static LIST_HEAD(queue_list);
-static DEFINE_MUTEX(ipqnl_mutex);
-
-static inline void
-__ipq_enqueue_entry(struct nf_queue_entry *entry)
-{
- list_add_tail(&entry->list, &queue_list);
- queue_total++;
-}
-
-static inline int
-__ipq_set_mode(unsigned char mode, unsigned int range)
-{
- int status = 0;
-
- switch(mode) {
- case IPQ_COPY_NONE:
- case IPQ_COPY_META:
- copy_mode = mode;
- copy_range = 0;
- break;
-
- case IPQ_COPY_PACKET:
- if (range > 0xFFFF)
- range = 0xFFFF;
- copy_range = range;
- copy_mode = mode;
- break;
-
- default:
- status = -EINVAL;
-
- }
- return status;
-}
-
-static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
-
-static inline void
-__ipq_reset(void)
-{
- peer_pid = 0;
- net_disable_timestamp();
- __ipq_set_mode(IPQ_COPY_NONE, 0);
- __ipq_flush(NULL, 0);
-}
-
-static struct nf_queue_entry *
-ipq_find_dequeue_entry(unsigned long id)
-{
- struct nf_queue_entry *entry = NULL, *i;
-
- spin_lock_bh(&queue_lock);
-
- list_for_each_entry(i, &queue_list, list) {
- if ((unsigned long)i == id) {
- entry = i;
- break;
- }
- }
-
- if (entry) {
- list_del(&entry->list);
- queue_total--;
- }
-
- spin_unlock_bh(&queue_lock);
- return entry;
-}
-
-static void
-__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
- struct nf_queue_entry *entry, *next;
-
- list_for_each_entry_safe(entry, next, &queue_list, list) {
- if (!cmpfn || cmpfn(entry, data)) {
- list_del(&entry->list);
- queue_total--;
- nf_reinject(entry, NF_DROP);
- }
- }
-}
-
-static void
-ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
- spin_lock_bh(&queue_lock);
- __ipq_flush(cmpfn, data);
- spin_unlock_bh(&queue_lock);
-}
-
-static struct sk_buff *
-ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
-{
- sk_buff_data_t old_tail;
- size_t size = 0;
- size_t data_len = 0;
- struct sk_buff *skb;
- struct ipq_packet_msg *pmsg;
- struct nlmsghdr *nlh;
- struct timeval tv;
-
- switch (ACCESS_ONCE(copy_mode)) {
- case IPQ_COPY_META:
- case IPQ_COPY_NONE:
- size = NLMSG_SPACE(sizeof(*pmsg));
- break;
-
- case IPQ_COPY_PACKET:
- if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
- (*errp = skb_checksum_help(entry->skb)))
- return NULL;
-
- data_len = ACCESS_ONCE(copy_range);
- if (data_len == 0 || data_len > entry->skb->len)
- data_len = entry->skb->len;
-
- size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
- break;
-
- default:
- *errp = -EINVAL;
- return NULL;
- }
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- goto nlmsg_failure;
-
- old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
- pmsg = NLMSG_DATA(nlh);
- memset(pmsg, 0, sizeof(*pmsg));
-
- pmsg->packet_id = (unsigned long )entry;
- pmsg->data_len = data_len;
- tv = ktime_to_timeval(entry->skb->tstamp);
- pmsg->timestamp_sec = tv.tv_sec;
- pmsg->timestamp_usec = tv.tv_usec;
- pmsg->mark = entry->skb->mark;
- pmsg->hook = entry->hook;
- pmsg->hw_protocol = entry->skb->protocol;
-
- if (entry->indev)
- strcpy(pmsg->indev_name, entry->indev->name);
- else
- pmsg->indev_name[0] = '\0';
-
- if (entry->outdev)
- strcpy(pmsg->outdev_name, entry->outdev->name);
- else
- pmsg->outdev_name[0] = '\0';
-
- if (entry->indev && entry->skb->dev &&
- entry->skb->mac_header != entry->skb->network_header) {
- pmsg->hw_type = entry->skb->dev->type;
- pmsg->hw_addrlen = dev_parse_header(entry->skb,
- pmsg->hw_addr);
- }
-
- if (data_len)
- if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
- BUG();
-
- nlh->nlmsg_len = skb->tail - old_tail;
- return skb;
-
-nlmsg_failure:
- kfree_skb(skb);
- *errp = -EINVAL;
- printk(KERN_ERR "ip_queue: error creating packet message\n");
- return NULL;
-}
-
-static int
-ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
-{
- int status = -EINVAL;
- struct sk_buff *nskb;
-
- if (copy_mode == IPQ_COPY_NONE)
- return -EAGAIN;
-
- nskb = ipq_build_packet_message(entry, &status);
- if (nskb == NULL)
- return status;
-
- spin_lock_bh(&queue_lock);
-
- if (!peer_pid)
- goto err_out_free_nskb;
-
- if (queue_total >= queue_maxlen) {
- queue_dropped++;
- status = -ENOSPC;
- if (net_ratelimit())
- printk (KERN_WARNING "ip_queue: full at %d entries, "
- "dropping packets(s). Dropped: %d\n", queue_total,
- queue_dropped);
- goto err_out_free_nskb;
- }
-
- /* netlink_unicast will either free the nskb or attach it to a socket */
- status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
- if (status < 0) {
- queue_user_dropped++;
- goto err_out_unlock;
- }
-
- __ipq_enqueue_entry(entry);
-
- spin_unlock_bh(&queue_lock);
- return status;
-
-err_out_free_nskb:
- kfree_skb(nskb);
-
-err_out_unlock:
- spin_unlock_bh(&queue_lock);
- return status;
-}
-
-static int
-ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
-{
- int diff;
- struct iphdr *user_iph = (struct iphdr *)v->payload;
- struct sk_buff *nskb;
-
- if (v->data_len < sizeof(*user_iph))
- return 0;
- diff = v->data_len - e->skb->len;
- if (diff < 0) {
- if (pskb_trim(e->skb, v->data_len))
- return -ENOMEM;
- } else if (diff > 0) {
- if (v->data_len > 0xFFFF)
- return -EINVAL;
- if (diff > skb_tailroom(e->skb)) {
- nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
- diff, GFP_ATOMIC);
- if (!nskb) {
- printk(KERN_WARNING "ip_queue: error "
- "in mangle, dropping packet\n");
- return -ENOMEM;
- }
- kfree_skb(e->skb);
- e->skb = nskb;
- }
- skb_put(e->skb, diff);
- }
- if (!skb_make_writable(e->skb, v->data_len))
- return -ENOMEM;
- skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
- e->skb->ip_summed = CHECKSUM_NONE;
-
- return 0;
-}
-
-static int
-ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
-{
- struct nf_queue_entry *entry;
-
- if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
- return -EINVAL;
-
- entry = ipq_find_dequeue_entry(vmsg->id);
- if (entry == NULL)
- return -ENOENT;
- else {
- int verdict = vmsg->value;
-
- if (vmsg->data_len && vmsg->data_len == len)
- if (ipq_mangle_ipv4(vmsg, entry) < 0)
- verdict = NF_DROP;
-
- nf_reinject(entry, verdict);
- return 0;
- }
-}
-
-static int
-ipq_set_mode(unsigned char mode, unsigned int range)
-{
- int status;
-
- spin_lock_bh(&queue_lock);
- status = __ipq_set_mode(mode, range);
- spin_unlock_bh(&queue_lock);
- return status;
-}
-
-static int
-ipq_receive_peer(struct ipq_peer_msg *pmsg,
- unsigned char type, unsigned int len)
-{
- int status = 0;
-
- if (len < sizeof(*pmsg))
- return -EINVAL;
-
- switch (type) {
- case IPQM_MODE:
- status = ipq_set_mode(pmsg->msg.mode.value,
- pmsg->msg.mode.range);
- break;
-
- case IPQM_VERDICT:
- status = ipq_set_verdict(&pmsg->msg.verdict,
- len - sizeof(*pmsg));
- break;
- default:
- status = -EINVAL;
- }
- return status;
-}
-
-static int
-dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
-{
- if (entry->indev)
- if (entry->indev->ifindex == ifindex)
- return 1;
- if (entry->outdev)
- if (entry->outdev->ifindex == ifindex)
- return 1;
-#ifdef CONFIG_BRIDGE_NETFILTER
- if (entry->skb->nf_bridge) {
- if (entry->skb->nf_bridge->physindev &&
- entry->skb->nf_bridge->physindev->ifindex == ifindex)
- return 1;
- if (entry->skb->nf_bridge->physoutdev &&
- entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
- return 1;
- }
-#endif
- return 0;
-}
-
-static void
-ipq_dev_drop(int ifindex)
-{
- ipq_flush(dev_cmp, ifindex);
-}
-
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
-static inline void
-__ipq_rcv_skb(struct sk_buff *skb)
-{
- int status, type, pid, flags;
- unsigned int nlmsglen, skblen;
- struct nlmsghdr *nlh;
- bool enable_timestamp = false;
-
- skblen = skb->len;
- if (skblen < sizeof(*nlh))
- return;
-
- nlh = nlmsg_hdr(skb);
- nlmsglen = nlh->nlmsg_len;
- if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
- return;
-
- pid = nlh->nlmsg_pid;
- flags = nlh->nlmsg_flags;
-
- if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
- RCV_SKB_FAIL(-EINVAL);
-
- if (flags & MSG_TRUNC)
- RCV_SKB_FAIL(-ECOMM);
-
- type = nlh->nlmsg_type;
- if (type < NLMSG_NOOP || type >= IPQM_MAX)
- RCV_SKB_FAIL(-EINVAL);
-
- if (type <= IPQM_BASE)
- return;
-
- if (!capable(CAP_NET_ADMIN))
- RCV_SKB_FAIL(-EPERM);
-
- spin_lock_bh(&queue_lock);
-
- if (peer_pid) {
- if (peer_pid != pid) {
- spin_unlock_bh(&queue_lock);
- RCV_SKB_FAIL(-EBUSY);
- }
- } else {
- enable_timestamp = true;
- peer_pid = pid;
- }
-
- spin_unlock_bh(&queue_lock);
- if (enable_timestamp)
- net_enable_timestamp();
- status = ipq_receive_peer(NLMSG_DATA(nlh), type,
- nlmsglen - NLMSG_LENGTH(0));
- if (status < 0)
- RCV_SKB_FAIL(status);
-
- if (flags & NLM_F_ACK)
- netlink_ack(skb, nlh, 0);
-}
-
-static void
-ipq_rcv_skb(struct sk_buff *skb)
-{
- mutex_lock(&ipqnl_mutex);
- __ipq_rcv_skb(skb);
- mutex_unlock(&ipqnl_mutex);
-}
-
-static int
-ipq_rcv_dev_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = ptr;
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- /* Drop any packets associated with the downed device */
- if (event == NETDEV_DOWN)
- ipq_dev_drop(dev->ifindex);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_dev_notifier = {
- .notifier_call = ipq_rcv_dev_event,
-};
-
-static int
-ipq_rcv_nl_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct netlink_notify *n = ptr;
-
- if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
- spin_lock_bh(&queue_lock);
- if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
- __ipq_reset();
- spin_unlock_bh(&queue_lock);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_nl_notifier = {
- .notifier_call = ipq_rcv_nl_event,
-};
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *ipq_sysctl_header;
-
-static ctl_table ipq_table[] = {
- {
- .procname = NET_IPQ_QMAX_NAME,
- .data = &queue_maxlen,
- .maxlen = sizeof(queue_maxlen),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- { }
-};
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int ip_queue_show(struct seq_file *m, void *v)
-{
- spin_lock_bh(&queue_lock);
-
- seq_printf(m,
- "Peer PID : %d\n"
- "Copy mode : %hu\n"
- "Copy range : %u\n"
- "Queue length : %u\n"
- "Queue max. length : %u\n"
- "Queue dropped : %u\n"
- "Netlink dropped : %u\n",
- peer_pid,
- copy_mode,
- copy_range,
- queue_total,
- queue_maxlen,
- queue_dropped,
- queue_user_dropped);
-
- spin_unlock_bh(&queue_lock);
- return 0;
-}
-
-static int ip_queue_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ip_queue_show, NULL);
-}
-
-static const struct file_operations ip_queue_proc_fops = {
- .open = ip_queue_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-#endif
-
-static const struct nf_queue_handler nfqh = {
- .name = "ip_queue",
- .outfn = &ipq_enqueue_packet,
-};
-
-static int __init ip_queue_init(void)
-{
- int status = -ENOMEM;
- struct proc_dir_entry *proc __maybe_unused;
-
- netlink_register_notifier(&ipq_nl_notifier);
- ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
- ipq_rcv_skb, NULL, THIS_MODULE);
- if (ipqnl == NULL) {
- printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
- goto cleanup_netlink_notifier;
- }
-
-#ifdef CONFIG_PROC_FS
- proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
- &ip_queue_proc_fops);
- if (!proc) {
- printk(KERN_ERR "ip_queue: failed to create proc entry\n");
- goto cleanup_ipqnl;
- }
-#endif
- register_netdevice_notifier(&ipq_dev_notifier);
-#ifdef CONFIG_SYSCTL
- ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
-#endif
- status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
- if (status < 0) {
- printk(KERN_ERR "ip_queue: failed to register queue handler\n");
- goto cleanup_sysctl;
- }
- return status;
-
-cleanup_sysctl:
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(ipq_sysctl_header);
-#endif
- unregister_netdevice_notifier(&ipq_dev_notifier);
- proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-cleanup_ipqnl: __maybe_unused
- netlink_kernel_release(ipqnl);
- mutex_lock(&ipqnl_mutex);
- mutex_unlock(&ipqnl_mutex);
-
-cleanup_netlink_notifier:
- netlink_unregister_notifier(&ipq_nl_notifier);
- return status;
-}
-
-static void __exit ip_queue_fini(void)
-{
- nf_unregister_queue_handlers(&nfqh);
-
- ipq_flush(NULL, 0);
-
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(ipq_sysctl_header);
-#endif
- unregister_netdevice_notifier(&ipq_dev_notifier);
- proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
- netlink_kernel_release(ipqnl);
- mutex_lock(&ipqnl_mutex);
- mutex_unlock(&ipqnl_mutex);
-
- netlink_unregister_notifier(&ipq_nl_notifier);
-}
-
-MODULE_DESCRIPTION("IPv4 packet queue handler");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
-
-module_init(ip_queue_init);
-module_exit(ip_queue_fini);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e83a3b..170b1fdd6b72 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -153,8 +153,7 @@ ip_checkentry(const struct ipt_ip *ip)
static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_info("error: `%s'\n", (const char *)par->targinfo);
+ net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
@@ -377,7 +376,7 @@ ipt_do_table(struct sk_buff *skb,
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
- verdict = (unsigned)(-v) - 1;
+ verdict = (unsigned int)(-v) - 1;
break;
}
if (*stackptr <= origptr) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a639967eb727..fe5daea5214d 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -246,8 +246,7 @@ clusterip_hashfn(const struct sk_buff *skb,
dport = ports[1];
}
} else {
- if (net_ratelimit())
- pr_info("unknown protocol %u\n", iph->protocol);
+ net_info_ratelimited("unknown protocol %u\n", iph->protocol);
}
switch (config->hash_mode) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21113a1..91747d4ebc26 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (iph == NULL)
- return -NF_DROP;
+ return -NF_ACCEPT;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
- return -NF_DROP;
+ return -NF_ACCEPT;
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
+ /* Check bogus IP headers */
+ if (*dataoff > skb->len) {
+ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
+ "nhoff %u, ihl %u, skblen %u\n",
+ nhoff, iph->ihl << 2, skb->len);
+ return -NF_ACCEPT;
+ }
+
return NF_ACCEPT;
}
@@ -303,8 +311,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip);
- NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip);
+ if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+ nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -356,7 +365,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.nla_policy = ipv4_nla_policy,
#endif
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path,
+ .ctl_table_path = "net/ipv4/netfilter",
.ctl_table = ip_ct_sysctl_table,
#endif
.me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7cbe9cb261c2..0847e373d33c 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -228,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
static int icmp_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *t)
{
- NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id);
- NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type);
- NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code);
-
+ if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
+ nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
+ nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -293,8 +293,8 @@ icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
-
+ if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 82536701e3a3..cad29c121318 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -42,9 +42,7 @@ static int set_addr(struct sk_buff *skb,
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet"
- " error\n");
+ net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
return -1;
}
@@ -58,9 +56,7 @@ static int set_addr(struct sk_buff *skb,
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
addroff, sizeof(buf),
(char *) &buf, sizeof(buf))) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet"
- " error\n");
+ net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
return -1;
}
/* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
@@ -214,8 +210,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
/* Run out of expectations */
if (i >= H323_RTP_CHANNEL_MAX) {
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of expectations\n");
+ net_notice_ratelimited("nf_nat_h323: out of expectations\n");
return 0;
}
@@ -244,8 +239,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of RTP ports\n");
+ net_notice_ratelimited("nf_nat_h323: out of RTP ports\n");
return 0;
}
@@ -308,8 +302,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_h323: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
return 0;
}
@@ -365,8 +358,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_q931: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
@@ -456,8 +448,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_ras: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
return 0;
}
@@ -545,8 +536,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
}
if (nated_port == 0) { /* No port available */
- if (net_ratelimit())
- pr_notice("nf_nat_q931: out of TCP ports\n");
+ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 57932c43960e..ea4a23813d26 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -283,7 +283,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
__be32 newip;
u_int16_t port;
char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
- unsigned buflen;
+ unsigned int buflen;
/* Connection will come from reply */
if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 2133c30a4a5f..746edec8b86e 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1206,8 +1206,7 @@ static int snmp_translate(struct nf_conn *ct,
if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
paylen, &map, &udph->check)) {
- if (net_ratelimit())
- printk(KERN_WARNING "bsalg: parser failed\n");
+ net_warn_ratelimited("bsalg: parser failed\n");
return NF_DROP;
}
return NF_ACCEPT;
@@ -1241,9 +1240,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
* can mess around with the payload.
*/
if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
- if (net_ratelimit())
- printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
- &iph->saddr, &iph->daddr);
+ net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
+ &iph->saddr, &iph->daddr);
return NF_DROP;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 50009c787bcd..2c00e8bf684d 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -51,15 +51,16 @@ static struct ping_table ping_table;
static u16 ping_port_rover;
-static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
{
int res = (num + net_hash_mix(net)) & mask;
+
pr_debug("hash(%d) = %d\n", num, res);
return res;
}
static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
- struct net *net, unsigned num)
+ struct net *net, unsigned int num)
{
return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
}
@@ -188,7 +189,8 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
gid_t *high)
{
gid_t *data = net->ipv4.sysctl_ping_group_range;
- unsigned seq;
+ unsigned int seq;
+
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -205,17 +207,22 @@ static int ping_init_sock(struct sock *sk)
gid_t range[2];
struct group_info *group_info = get_current_groups();
int i, j, count = group_info->ngroups;
+ kgid_t low, high;
inet_get_ping_group_range_net(net, range, range+1);
+ low = make_kgid(&init_user_ns, range[0]);
+ high = make_kgid(&init_user_ns, range[1]);
+ if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
+ return -EACCES;
+
if (range[0] <= group && group <= range[1])
return 0;
for (i = 0; i < group_info->nblocks; i++) {
int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
-
for (j = 0; j < cp_count; j++) {
- group = group_info->blocks[i][j];
- if (range[0] <= group && group <= range[1])
+ kgid_t gid = group_info->blocks[i][j];
+ if (gid_lte(low, gid) && gid_lte(gid, high))
return 0;
}
@@ -410,7 +417,7 @@ struct pingfakehdr {
__wsum wcheck;
};
-static int ping_getfrag(void *from, char * to,
+static int ping_getfrag(void *from, char *to,
int offset, int fraglen, int odd, struct sk_buff *skb)
{
struct pingfakehdr *pfh = (struct pingfakehdr *)from;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbd604c68e68..4032b818f3e4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -288,7 +288,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
read_unlock(&raw_v4_hashinfo.lock);
}
-static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
+static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
/* Charge it to the socket. */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10b521a..98b30d08efe9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
+#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
@@ -229,7 +230,7 @@ const __u8 ip_tos2prio[16] = {
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
-
+EXPORT_SYMBOL(ip_tos2prio);
/*
* Route cache.
@@ -296,7 +297,7 @@ static inline void rt_hash_lock_init(void)
#endif
static struct rt_hash_bucket *rt_hash_table __read_mostly;
-static unsigned rt_hash_mask __read_mostly;
+static unsigned int rt_hash_mask __read_mostly;
static unsigned int rt_hash_log __read_mostly;
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
@@ -959,8 +960,7 @@ void rt_cache_flush_batch(struct net *net)
static void rt_emergency_hash_rebuild(struct net *net)
{
- if (net_ratelimit())
- pr_warn("Route hash chain too long!\n");
+ net_warn_ratelimited("Route hash chain too long!\n");
rt_cache_invalidate(net);
}
@@ -1083,8 +1083,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
goto out;
if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
goto out;
- if (net_ratelimit())
- pr_warn("dst cache overflow\n");
+ net_warn_ratelimited("dst cache overflow\n");
RT_CACHE_STAT_INC(gc_dst_overflow);
return 1;
@@ -1143,7 +1142,7 @@ static int rt_bind_neighbour(struct rtable *rt)
return 0;
}
-static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
+static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
struct sk_buff *skb, int ifindex)
{
struct rtable *rth, *cand;
@@ -1181,8 +1180,7 @@ restart:
if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
int err = rt_bind_neighbour(rt);
if (err) {
- if (net_ratelimit())
- pr_warn("Neighbour table failure & not caching routes\n");
+ net_warn_ratelimited("Neighbour table failure & not caching routes\n");
ip_rt_put(rt);
return ERR_PTR(err);
}
@@ -1298,8 +1296,7 @@ restart:
goto restart;
}
- if (net_ratelimit())
- pr_warn("Neighbour table overflow\n");
+ net_warn_ratelimited("Neighbour table overflow\n");
rt_drop(rt);
return ERR_PTR(-ENOBUFS);
}
@@ -1377,14 +1374,13 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
return;
}
} else if (!rt)
- printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
- __builtin_return_address(0));
+ pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
ip_select_fb_ident(iph);
}
EXPORT_SYMBOL(__ip_select_ident);
-static void rt_del(unsigned hash, struct rtable *rt)
+static void rt_del(unsigned int hash, struct rtable *rt)
{
struct rtable __rcu **rthp;
struct rtable *aux;
@@ -1502,11 +1498,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- pr_info("Redirect from %pI4 on %s about %pI4 ignored\n"
- " Advised path = %pI4 -> %pI4\n",
- &old_gw, dev->name, &new_gw,
- &saddr, &daddr);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
+ " Advised path = %pI4 -> %pI4\n",
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
;
}
@@ -1538,7 +1534,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
ip_rt_put(rt);
ret = NULL;
} else if (rt->rt_flags & RTCF_REDIRECTED) {
- unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
+ unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
rt->rt_oif,
rt_genid(dev_net(dst->dev)));
rt_del(hash, rt);
@@ -1616,11 +1612,10 @@ void ip_rt_send_redirect(struct sk_buff *skb)
++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- peer->rate_tokens == ip_rt_redirect_number &&
- net_ratelimit())
- pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
- &ip_hdr(skb)->saddr, rt->rt_iif,
- &rt->rt_dst, &rt->rt_gateway);
+ peer->rate_tokens == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, rt->rt_iif,
+ &rt->rt_dst, &rt->rt_gateway);
#endif
}
}
@@ -1843,9 +1838,9 @@ static void ipv4_link_failure(struct sk_buff *skb)
static int ip_rt_bug(struct sk_buff *skb)
{
- printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
- &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
- skb->dev ? skb->dev->name : "?");
+ pr_debug("%s: %pI4 -> %pI4, %s\n",
+ __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+ skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
WARN_ON(1);
return 0;
@@ -2134,8 +2129,7 @@ static int __mkroute_input(struct sk_buff *skb,
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
- if (net_ratelimit())
- pr_crit("Bug in ip_route_input_slow(). Please report.\n");
+ net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
@@ -2215,9 +2209,9 @@ static int ip_mkroute_input(struct sk_buff *skb,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
- struct rtable* rth = NULL;
+ struct rtable *rth = NULL;
int err;
- unsigned hash;
+ unsigned int hash;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1)
@@ -2255,13 +2249,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct flowi4 fl4;
- unsigned flags = 0;
+ unsigned int flags = 0;
u32 itag = 0;
- struct rtable * rth;
- unsigned hash;
+ struct rtable *rth;
+ unsigned int hash;
__be32 spec_dst;
int err = -EINVAL;
- struct net * net = dev_net(dev);
+ struct net *net = dev_net(dev);
/* IP on this device is disabled. */
@@ -2406,9 +2400,9 @@ no_route:
martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- pr_warn("martian destination %pI4 from %pI4, dev %s\n",
- &daddr, &saddr, dev->name);
+ if (IN_DEV_LOG_MARTIANS(in_dev))
+ net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
+ &daddr, &saddr, dev->name);
#endif
e_hostunreach:
@@ -2433,8 +2427,8 @@ martian_source_keep_err:
int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, bool noref)
{
- struct rtable * rth;
- unsigned hash;
+ struct rtable *rth;
+ unsigned int hash;
int iif = dev->ifindex;
struct net *net;
int res;
@@ -2972,7 +2966,8 @@ static int rt_fill_info(struct net *net,
r->rtm_src_len = 0;
r->rtm_tos = rt->rt_key_tos;
r->rtm_table = RT_TABLE_MAIN;
- NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
+ if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
+ goto nla_put_failure;
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
@@ -2980,31 +2975,38 @@ static int rt_fill_info(struct net *net,
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
-
+ if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+ goto nla_put_failure;
if (rt->rt_key_src) {
r->rtm_src_len = 32;
- NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
+ if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+ goto nla_put_failure;
}
- if (rt->dst.dev)
- NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
+ if (rt->dst.dev &&
+ nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+ goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (rt->dst.tclassid)
- NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
+ if (rt->dst.tclassid &&
+ nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
+ goto nla_put_failure;
#endif
- if (rt_is_input_route(rt))
- NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
- else if (rt->rt_src != rt->rt_key_src)
- NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
-
- if (rt->rt_dst != rt->rt_gateway)
- NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
+ if (rt_is_input_route(rt)) {
+ if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
+ goto nla_put_failure;
+ } else if (rt->rt_src != rt->rt_key_src) {
+ if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+ goto nla_put_failure;
+ }
+ if (rt->rt_dst != rt->rt_gateway &&
+ nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+ goto nla_put_failure;
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
goto nla_put_failure;
- if (rt->rt_mark)
- NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
+ if (rt->rt_mark &&
+ nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+ goto nla_put_failure;
error = rt->dst.error;
if (peer) {
@@ -3045,7 +3047,8 @@ static int rt_fill_info(struct net *net,
}
} else
#endif
- NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
+ if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+ goto nla_put_failure;
}
if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -3059,7 +3062,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
@@ -3334,23 +3337,6 @@ static ctl_table ipv4_route_table[] = {
{ }
};
-static struct ctl_table empty[1];
-
-static struct ctl_table ipv4_skeleton[] =
-{
- { .procname = "route",
- .mode = 0555, .child = ipv4_route_table},
- { .procname = "neigh",
- .mode = 0555, .child = empty},
- { }
-};
-
-static __net_initdata struct ctl_path ipv4_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { },
-};
-
static struct ctl_table ipv4_route_flush_table[] = {
{
.procname = "flush",
@@ -3361,13 +3347,6 @@ static struct ctl_table ipv4_route_flush_table[] = {
{ },
};
-static __net_initdata struct ctl_path ipv4_route_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { .procname = "route", },
- { },
-};
-
static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
@@ -3380,8 +3359,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
}
tbl[0].extra1 = net;
- net->ipv4.route_hdr =
- register_net_sysctl_table(net, ipv4_route_path, tbl);
+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
if (net->ipv4.route_hdr == NULL)
goto err_reg;
return 0;
@@ -3430,9 +3408,15 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
static __initdata unsigned long rhash_entries;
static int __init set_rhash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- rhash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &rhash_entries);
+ if (ret)
+ return 0;
+
return 1;
}
__setup("rhash_entries=", set_rhash_entries);
@@ -3468,6 +3452,7 @@ int __init ip_rt_init(void)
0,
&rt_hash_log,
&rt_hash_mask,
+ 0,
rhash_entries ? 0 : 512 * 1024);
memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
rt_hash_lock_init();
@@ -3505,6 +3490,6 @@ int __init ip_rt_init(void)
*/
void __init ip_static_sysctl_init(void)
{
- register_sysctl_paths(ipv4_path, ipv4_skeleton);
+ register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
}
#endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724da9bff..ef32956ed655 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
#include <net/tcp_memcontrol.h>
static int zero;
+static int two = 2;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -78,7 +79,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
{
gid_t *data = table->data;
- unsigned seq;
+ unsigned int seq;
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -677,6 +678,15 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_early_retrans",
+ .data = &sysctl_tcp_early_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -768,13 +778,6 @@ static struct ctl_table ipv4_net_table[] = {
{ }
};
-struct ctl_path net_ipv4_ctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { },
-};
-EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
-
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
struct ctl_table *table;
@@ -815,8 +818,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
tcp_init_mem(net);
- net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
- net_ipv4_ctl_path, table);
+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
if (net->ipv4.ipv4_hdr == NULL)
goto err_reg;
@@ -857,12 +859,12 @@ static __init int sysctl_ipv4_init(void)
if (!i->procname)
return -EINVAL;
- hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
+ hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
if (hdr == NULL)
return -ENOMEM;
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
- unregister_sysctl_table(hdr);
+ unregister_net_sysctl_table(hdr);
return -ENOMEM;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed30e821..3ba605f60e4e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -363,6 +363,71 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
return period;
}
+/* Address-family independent initialization for a tcp_sock.
+ *
+ * NOTE: A lot of things set to zero explicitly by call to
+ * sk_alloc() so need not be done here.
+ */
+void tcp_init_sock(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ skb_queue_head_init(&tp->out_of_order_queue);
+ tcp_init_xmit_timers(sk);
+ tcp_prequeue_init(tp);
+
+ icsk->icsk_rto = TCP_TIMEOUT_INIT;
+ tp->mdev = TCP_TIMEOUT_INIT;
+
+ /* So many TCP implementations out there (incorrectly) count the
+ * initial SYN frame in their delayed-ACK and congestion control
+ * algorithms that we must have the following bandaid to talk
+ * efficiently to them. -DaveM
+ */
+ tp->snd_cwnd = TCP_INIT_CWND;
+
+ /* See draft-stevens-tcpca-spec-01 for discussion of the
+ * initialization of these values.
+ */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ tp->snd_cwnd_clamp = ~0;
+ tp->mss_cache = TCP_MSS_DEFAULT;
+
+ tp->reordering = sysctl_tcp_reordering;
+ tcp_enable_early_retrans(tp);
+ icsk->icsk_ca_ops = &tcp_init_congestion_ops;
+
+ sk->sk_state = TCP_CLOSE;
+
+ sk->sk_write_space = sk_stream_write_space;
+ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+ icsk->icsk_sync_mss = tcp_sync_mss;
+
+ /* TCP Cookie Transactions */
+ if (sysctl_tcp_cookie_size > 0) {
+ /* Default, cookies without s_data_payload. */
+ tp->cookie_values =
+ kzalloc(sizeof(*tp->cookie_values),
+ sk->sk_allocation);
+ if (tp->cookie_values != NULL)
+ kref_init(&tp->cookie_values->kref);
+ }
+ /* Presumed zeroed, in order of appearance:
+ * cookie_in_always, cookie_out_never,
+ * s_data_constant, s_data_in, s_data_out
+ */
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+
+ local_bh_disable();
+ sock_update_memcg(sk);
+ sk_sockets_allocated_inc(sk);
+ local_bh_enable();
+}
+EXPORT_SYMBOL(tcp_init_sock);
+
/*
* Wait for a TCP event.
*
@@ -528,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
tp->pushed_seq = tp->write_seq;
}
-static inline int forced_push(const struct tcp_sock *tp)
+static inline bool forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
@@ -701,11 +766,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (skb) {
if (sk_wmem_schedule(sk, skb->truesize)) {
+ skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
*/
- skb_reserve(skb, skb_tailroom(skb) - size);
+ skb->avail_size = size;
return skb;
}
__kfree_skb(skb);
@@ -783,9 +849,10 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
while (psize > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
- int copy, i, can_coalesce;
+ int copy, i;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
+ bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
@@ -850,8 +917,7 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
@@ -918,7 +984,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags, err, copied;
- int mss_now, size_goal;
+ int mss_now = 0, size_goal;
bool sg;
long timeo;
@@ -932,6 +998,19 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
+ if (unlikely(tp->repair)) {
+ if (tp->repair_queue == TCP_RECV_QUEUE) {
+ copied = tcp_send_rcvq(sk, msg, size);
+ goto out;
+ }
+
+ err = -EINVAL;
+ if (tp->repair_queue == TCP_NO_QUEUE)
+ goto out_err;
+
+ /* 'common' sending to sendq */
+ }
+
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -995,15 +1074,14 @@ new_segment:
copy = seglen;
/* Where to copy to? */
- if (skb_tailroom(skb) > 0) {
+ if (skb_availroom(skb) > 0) {
/* We have some space in skb head. Superb! */
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ copy = min_t(int, copy, skb_availroom(skb));
err = skb_add_data_nocache(sk, skb, from, copy);
if (err)
goto do_fault;
} else {
- int merge = 0;
+ bool merge = false;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = sk->sk_sndmsg_page;
int off;
@@ -1017,7 +1095,7 @@ new_segment:
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
- merge = 1;
+ merge = true;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
@@ -1089,7 +1167,7 @@ new_segment:
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < max || (flags & MSG_OOB))
+ if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
continue;
if (forced_push(tp)) {
@@ -1102,7 +1180,7 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied)
+ if (copied && likely(!tp->repair))
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1113,7 +1191,7 @@ wait_for_memory:
}
out:
- if (copied)
+ if (copied && likely(!tp->repair))
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
return copied;
@@ -1187,6 +1265,24 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
return -EAGAIN;
}
+static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct sk_buff *skb;
+ int copied = 0, err = 0;
+
+ /* XXX -- need to support SO_PEEK_OFF */
+
+ skb_queue_walk(&sk->sk_write_queue, skb) {
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+ if (err)
+ break;
+
+ copied += skb->len;
+ }
+
+ return err ?: copied;
+}
+
/* Clean up the receive buffer for full frames taken by the user,
* then send an ACK if necessary. COPIED is the number of bytes
* tcp_recvmsg has given to the user so far, it speeds up the
@@ -1196,7 +1292,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
- int time_to_ack = 0;
+ bool time_to_ack = false;
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
@@ -1222,7 +1318,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
!icsk->icsk_ack.pingpong)) &&
!atomic_read(&sk->sk_rmem_alloc)))
- time_to_ack = 1;
+ time_to_ack = true;
}
/* We send an ACK if we can now advertise a non-zero window
@@ -1244,7 +1340,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
* "Lots" means "at least twice" here.
*/
if (new_window && new_window >= 2 * rcv_window_now)
- time_to_ack = 1;
+ time_to_ack = true;
}
}
if (time_to_ack)
@@ -1376,11 +1472,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
break;
}
if (tcp_hdr(skb)->fin) {
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
++seq;
break;
}
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
if (!desc->count)
break;
tp->copied_seq = seq;
@@ -1416,7 +1512,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int target; /* Read at least this many bytes */
long timeo;
struct task_struct *user_recv = NULL;
- int copied_early = 0;
+ bool copied_early = false;
struct sk_buff *skb;
u32 urg_hole = 0;
@@ -1432,6 +1528,21 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (flags & MSG_OOB)
goto recv_urg;
+ if (unlikely(tp->repair)) {
+ err = -EPERM;
+ if (!(flags & MSG_PEEK))
+ goto out;
+
+ if (tp->repair_queue == TCP_SEND_QUEUE)
+ goto recv_sndq;
+
+ err = -EINVAL;
+ if (tp->repair_queue == TCP_NO_QUEUE)
+ goto out;
+
+ /* 'common' recv queue MSG_PEEK-ing */
+ }
+
seq = &tp->copied_seq;
if (flags & MSG_PEEK) {
peek_seq = tp->copied_seq;
@@ -1452,7 +1563,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- dma_find_channel(DMA_MEMCPY)) {
+ net_dma_find_channel()) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1633,9 +1744,9 @@ do_prequeue:
}
if ((flags & MSG_PEEK) &&
(peek_seq - copied - urg_hole != tp->copied_seq)) {
- if (net_ratelimit())
- printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
- current->comm, task_pid_nr(current));
+ net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
+ current->comm,
+ task_pid_nr(current));
peek_seq = tp->copied_seq;
}
continue;
@@ -1667,7 +1778,7 @@ do_prequeue:
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+ tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1689,7 +1800,7 @@ do_prequeue:
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
- copied_early = 1;
+ copied_early = true;
} else
#endif
@@ -1723,7 +1834,7 @@ skip_copy:
goto found_fin_ok;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
continue;
@@ -1732,7 +1843,7 @@ skip_copy:
++*seq;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
break;
} while (len > 0);
@@ -1783,6 +1894,10 @@ out:
recv_urg:
err = tcp_recv_urg(sk, msg, len, flags);
goto out;
+
+recv_sndq:
+ err = tcp_peek_sndq(sk, msg, len);
+ goto out;
}
EXPORT_SYMBOL(tcp_recvmsg);
@@ -1886,10 +2001,10 @@ bool tcp_check_oom(struct sock *sk, int shift)
too_many_orphans = tcp_too_many_orphans(sk, shift);
out_of_socket_memory = tcp_out_of_memory(sk);
- if (too_many_orphans && net_ratelimit())
- pr_info("too many orphaned sockets\n");
- if (out_of_socket_memory && net_ratelimit())
- pr_info("out of memory -- consider tuning tcp_mem\n");
+ if (too_many_orphans)
+ net_info_ratelimited("too many orphaned sockets\n");
+ if (out_of_socket_memory)
+ net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
return too_many_orphans || out_of_socket_memory;
}
@@ -1935,7 +2050,9 @@ void tcp_close(struct sock *sk, long timeout)
* advertise a zero window, then kill -9 the FTP client, wheee...
* Note: timeout is always zero in such a case.
*/
- if (data_was_unread) {
+ if (unlikely(tcp_sk(sk)->repair)) {
+ sk->sk_prot->disconnect(sk, 0);
+ } else if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
@@ -2053,7 +2170,7 @@ EXPORT_SYMBOL(tcp_close);
/* These states need RST on ABORT according to RFC793 */
-static inline int tcp_need_reset(int state)
+static inline bool tcp_need_reset(int state)
{
return (1 << state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2074,6 +2191,8 @@ int tcp_disconnect(struct sock *sk, int flags)
/* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
+ } else if (unlikely(tp->repair)) {
+ sk->sk_err = ECONNABORTED;
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -2125,6 +2244,54 @@ int tcp_disconnect(struct sock *sk, int flags)
}
EXPORT_SYMBOL(tcp_disconnect);
+static inline bool tcp_can_repair_sock(const struct sock *sk)
+{
+ return capable(CAP_NET_ADMIN) &&
+ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
+}
+
+static int tcp_repair_options_est(struct tcp_sock *tp,
+ struct tcp_repair_opt __user *optbuf, unsigned int len)
+{
+ struct tcp_repair_opt opt;
+
+ while (len >= sizeof(opt)) {
+ if (copy_from_user(&opt, optbuf, sizeof(opt)))
+ return -EFAULT;
+
+ optbuf++;
+ len -= sizeof(opt);
+
+ switch (opt.opt_code) {
+ case TCPOPT_MSS:
+ tp->rx_opt.mss_clamp = opt.opt_val;
+ break;
+ case TCPOPT_WINDOW:
+ if (opt.opt_val > 14)
+ return -EFBIG;
+
+ tp->rx_opt.snd_wscale = opt.opt_val;
+ break;
+ case TCPOPT_SACK_PERM:
+ if (opt.opt_val != 0)
+ return -EINVAL;
+
+ tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
+ if (sysctl_tcp_fack)
+ tcp_enable_fack(tp);
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (opt.opt_val != 0)
+ return -EINVAL;
+
+ tp->rx_opt.tstamp_ok = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
/*
* Socket option code for TCP.
*/
@@ -2295,6 +2462,55 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
err = -EINVAL;
else
tp->thin_dupack = val;
+ if (tp->thin_dupack)
+ tcp_disable_early_retrans(tp);
+ break;
+
+ case TCP_REPAIR:
+ if (!tcp_can_repair_sock(sk))
+ err = -EPERM;
+ else if (val == 1) {
+ tp->repair = 1;
+ sk->sk_reuse = SK_FORCE_REUSE;
+ tp->repair_queue = TCP_NO_QUEUE;
+ } else if (val == 0) {
+ tp->repair = 0;
+ sk->sk_reuse = SK_NO_REUSE;
+ tcp_send_window_probe(sk);
+ } else
+ err = -EINVAL;
+
+ break;
+
+ case TCP_REPAIR_QUEUE:
+ if (!tp->repair)
+ err = -EPERM;
+ else if (val < TCP_QUEUES_NR)
+ tp->repair_queue = val;
+ else
+ err = -EINVAL;
+ break;
+
+ case TCP_QUEUE_SEQ:
+ if (sk->sk_state != TCP_CLOSE)
+ err = -EPERM;
+ else if (tp->repair_queue == TCP_SEND_QUEUE)
+ tp->write_seq = val;
+ else if (tp->repair_queue == TCP_RECV_QUEUE)
+ tp->rcv_nxt = val;
+ else
+ err = -EINVAL;
+ break;
+
+ case TCP_REPAIR_OPTIONS:
+ if (!tp->repair)
+ err = -EINVAL;
+ else if (sk->sk_state == TCP_ESTABLISHED)
+ err = tcp_repair_options_est(tp,
+ (struct tcp_repair_opt __user *)optval,
+ optlen);
+ else
+ err = -EPERM;
break;
case TCP_CORK:
@@ -2530,6 +2746,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = tp->mss_cache;
if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->rx_opt.user_mss;
+ if (tp->repair)
+ val = tp->rx_opt.mss_clamp;
break;
case TCP_NODELAY:
val = !!(tp->nonagle&TCP_NAGLE_OFF);
@@ -2632,6 +2850,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = tp->thin_dupack;
break;
+ case TCP_REPAIR:
+ val = tp->repair;
+ break;
+
+ case TCP_REPAIR_QUEUE:
+ if (tp->repair)
+ val = tp->repair_queue;
+ else
+ return -EINVAL;
+ break;
+
+ case TCP_QUEUE_SEQ:
+ if (tp->repair_queue == TCP_SEND_QUEUE)
+ val = tp->write_seq;
+ else if (tp->repair_queue == TCP_RECV_QUEUE)
+ val = tp->rcv_nxt;
+ else
+ return -EINVAL;
+ break;
+
case TCP_USER_TIMEOUT:
val = jiffies_to_msecs(icsk->icsk_user_timeout);
break;
@@ -2675,7 +2913,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
- unsigned thlen;
+ unsigned int thlen;
unsigned int seq;
__be32 delta;
unsigned int oldlen;
@@ -2933,13 +3171,13 @@ out_free:
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
struct tcp_md5sig_pool __percpu *pool;
- int alloc = 0;
+ bool alloc = false;
retry:
spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
- alloc = 1;
+ alloc = true;
spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
@@ -3033,9 +3271,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
struct scatterlist sg;
const struct tcphdr *tp = tcp_hdr(skb);
struct hash_desc *desc = &hp->md5_desc;
- unsigned i;
- const unsigned head_data_len = skb_headlen(skb) > header_len ?
- skb_headlen(skb) - header_len : 0;
+ unsigned int i;
+ const unsigned int head_data_len = skb_headlen(skb) > header_len ?
+ skb_headlen(skb) - header_len : 0;
const struct skb_shared_info *shi = skb_shinfo(skb);
struct sk_buff *frag_iter;
@@ -3223,9 +3461,15 @@ extern struct tcp_congestion_ops tcp_reno;
static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- thash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &thash_entries);
+ if (ret)
+ return 0;
+
return 1;
}
__setup("thash_entries=", set_thash_entries);
@@ -3243,7 +3487,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
- int max_share, cnt;
+ int max_rshare, max_wshare, cnt;
unsigned int i;
unsigned long jiffy = jiffies;
@@ -3270,6 +3514,7 @@ void __init tcp_init(void)
0,
NULL,
&tcp_hashinfo.ehash_mask,
+ 0,
thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
@@ -3286,6 +3531,7 @@ void __init tcp_init(void)
0,
&tcp_hashinfo.bhash_size,
NULL,
+ 0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
@@ -3302,17 +3548,17 @@ void __init tcp_init(void)
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
- limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
- limit = max(limit, 128UL);
- max_share = min(4UL*1024*1024, limit);
+ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
+ max_wshare = min(4UL*1024*1024, limit);
+ max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
- sysctl_tcp_wmem[2] = max(64*1024, max_share);
+ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
- sysctl_tcp_rmem[2] = max(87380, max_share);
+ sysctl_tcp_rmem[2] = max(87380, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 272a84593c85..04dbd7ae7c62 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
/* RFC2861 Check whether we are limited by application or congestion window
* This is the inverse of cwnd check in tcp_tso_should_defer
*/
-int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 left;
if (in_flight >= tp->snd_cwnd)
- return 1;
+ return true;
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
left * tp->mss_cache < sk->sk_gso_max_size)
- return 1;
+ return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index fe3ecf484b44..57bdd17dff4d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -15,7 +15,7 @@
/* Tcp Hybla structure. */
struct hybla {
- u8 hybla_en;
+ bool hybla_en;
u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
u32 rho; /* Rho parameter, integer part */
u32 rho2; /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
u32 minrtt; /* Minimum smoothed round trip time value seen */
};
-/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
- expressed in jiffies */
+/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
static int rtt0 = 25;
module_param(rtt0, int, 0644);
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
ca->rho = ca->rho_3ls >> 3;
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
- ca->rho2 = ca->rho2_7ls >>7;
+ ca->rho2 = ca->rho2_7ls >> 7;
}
static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
ca->rho_3ls = 0;
ca->rho2_7ls = 0;
ca->snd_cwnd_cents = 0;
- ca->hybla_en = 1;
+ ca->hybla_en = true;
tp->snd_cwnd = 2;
tp->snd_cwnd_clamp = 65535;
@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
static void hybla_state(struct sock *sk, u8 ca_state)
{
struct hybla *ca = inet_csk_ca(sk);
+
ca->hybla_en = (ca_state == TCP_CA_Open);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f7fa8d..b224eb8bce8b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
-int sysctl_tcp_adv_win_scale __read_mostly = 2;
+int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
int sysctl_tcp_stdurg __read_mostly;
@@ -99,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_abc __read_mostly;
+int sysctl_tcp_early_retrans __read_mostly = 2;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -175,7 +176,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
static void tcp_incr_quickack(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
+ unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
if (quickacks == 0)
quickacks = 2;
@@ -195,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk)
* and the session is not interactive.
*/
-static inline int tcp_in_quickack_mode(const struct sock *sk)
+static inline bool tcp_in_quickack_mode(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+
return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
}
@@ -252,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
- return 1;
- return 0;
+ return true;
+ return false;
}
/* Buffer size and advertised window tuning.
@@ -335,6 +337,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
incr = __tcp_grow_window(sk, skb);
if (incr) {
+ incr = max_t(int, incr, 2 * skb->len);
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
tp->window_clamp);
inet_csk(sk)->icsk_ack.quick |= 1;
@@ -474,8 +477,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
if (!win_dep) {
m -= (new_sample >> 3);
new_sample += m;
- } else if (m < new_sample)
- new_sample = m << 3;
+ } else {
+ m <<= 3;
+ if (m < new_sample)
+ new_sample = m;
+ }
} else {
/* No previous measure. */
new_sample = m << 3;
@@ -491,7 +497,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
+ tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
new_measure:
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -902,6 +908,7 @@ static void tcp_init_metrics(struct sock *sk)
if (dst_metric(dst, RTAX_REORDERING) &&
tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
tcp_disable_fack(tp);
+ tcp_disable_early_retrans(tp);
tp->reordering = dst_metric(dst, RTAX_REORDERING);
}
@@ -933,7 +940,7 @@ static void tcp_init_metrics(struct sock *sk)
tcp_set_rto(sk);
reset:
if (tp->srtt == 0) {
- /* RFC2988bis: We've failed to get a valid RTT sample from
+ /* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
* from the more aggressive 1sec to avoid more spurious
@@ -943,7 +950,7 @@ reset:
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
}
/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
- * retransmitted. In light of RFC2988bis' more aggressive 1sec
+ * retransmitted. In light of RFC6298 more aggressive 1sec
* initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
* retransmission has occurred.
*/
@@ -975,15 +982,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
NET_INC_STATS_BH(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
- printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
- tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
- tp->reordering,
- tp->fackets_out,
- tp->sacked_out,
- tp->undo_marker ? tp->undo_retrans : 0);
+ pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
+ tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
+ tp->reordering,
+ tp->fackets_out,
+ tp->sacked_out,
+ tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp);
}
+
+ if (metric > 0)
+ tcp_disable_early_retrans(tp);
}
/* This must be called before lost_out is incremented */
@@ -1114,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
* the exact amount is rather hard to quantify. However, tp->max_window can
* be used as an exaggerated estimate.
*/
-static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
- u32 start_seq, u32 end_seq)
+static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
+ u32 start_seq, u32 end_seq)
{
/* Too far in future, or reversed (interpretation is ambiguous) */
if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
- return 0;
+ return false;
/* Nasty start_seq wrap-around check (see comments above) */
if (!before(start_seq, tp->snd_nxt))
- return 0;
+ return false;
/* In outstanding window? ...This is valid exit for D-SACKs too.
* start_seq == snd_una is non-sensical (see comments above)
*/
if (after(start_seq, tp->snd_una))
- return 1;
+ return true;
if (!is_dsack || !tp->undo_marker)
- return 0;
+ return false;
/* ...Then it's D-SACK, and must reside below snd_una completely */
if (after(end_seq, tp->snd_una))
- return 0;
+ return false;
if (!before(start_seq, tp->undo_marker))
- return 1;
+ return true;
/* Too old */
if (!after(end_seq, tp->undo_marker))
- return 0;
+ return false;
/* Undo_marker boundary crossing (overestimates a lot). Known already:
* start_seq < undo_marker and end_seq >= undo_marker.
@@ -1215,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk)
tp->lost_retrans_low = new_low_seq;
}
-static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
- struct tcp_sack_block_wire *sp, int num_sacks,
- u32 prior_snd_una)
+static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
+ struct tcp_sack_block_wire *sp, int num_sacks,
+ u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
- int dup_sack = 0;
+ bool dup_sack = false;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
- dup_sack = 1;
+ dup_sack = true;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
@@ -1234,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
- dup_sack = 1;
+ dup_sack = true;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPDSACKOFORECV);
@@ -1265,9 +1275,10 @@ struct tcp_sacktag_state {
* FIXME: this could be merged to shift decision code
*/
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
- u32 start_seq, u32 end_seq)
+ u32 start_seq, u32 end_seq)
{
- int in_sack, err;
+ int err;
+ bool in_sack;
unsigned int pkt_len;
unsigned int mss;
@@ -1313,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- int dup_sack, int pcount)
+ bool dup_sack, int pcount)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1393,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Shift newly-SACKed bytes from this skb to the immediately previous
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
*/
-static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
- struct tcp_sacktag_state *state,
- unsigned int pcount, int shifted, int mss,
- int dup_sack)
+static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sacktag_state *state,
+ unsigned int pcount, int shifted, int mss,
+ bool dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1446,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
if (skb->len > 0) {
BUG_ON(!tcp_skb_pcount(skb));
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
- return 0;
+ return false;
}
/* Whole SKB was eaten :-) */
@@ -1469,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
- return 1;
+ return true;
}
/* I wish gso_size would have a bit more sane initialization than
@@ -1492,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb)
static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
- int dup_sack)
+ bool dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev;
@@ -1631,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
struct tcp_sack_block *next_dup,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
- int dup_sack_in)
+ bool dup_sack_in)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *tmp;
tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;
- int dup_sack = dup_sack_in;
+ bool dup_sack = dup_sack_in;
if (skb == tcp_send_head(sk))
break;
@@ -1653,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
next_dup->start_seq,
next_dup->end_seq);
if (in_sack > 0)
- dup_sack = 1;
+ dup_sack = true;
}
/* skb reference here is a bit tricky to get right, since
@@ -1758,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
- int found_dup_sack = 0;
+ bool found_dup_sack = false;
int i, j;
int first_sack_index;
@@ -1789,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
used_sacks = 0;
first_sack_index = 0;
for (i = 0; i < num_sacks; i++) {
- int dup_sack = !i && found_dup_sack;
+ bool dup_sack = !i && found_dup_sack;
sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1856,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
while (i < used_sacks) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;
- int dup_sack = (found_dup_sack && (i == first_sack_index));
+ bool dup_sack = (found_dup_sack && (i == first_sack_index));
struct tcp_sack_block *next_dup = NULL;
if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1958,9 +1969,9 @@ out:
}
/* Limits sacked_out so that sum with lost_out isn't ever larger than
- * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
+ * packets_out. Returns false if sacked_out adjustement wasn't necessary.
*/
-static int tcp_limit_reno_sacked(struct tcp_sock *tp)
+static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
{
u32 holes;
@@ -1969,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp)
if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* If we receive more dupacks than we expected counting segments
@@ -2025,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp)
/* F-RTO can only be used if TCP has never retransmitted anything other than
* head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
*/
-int tcp_use_frto(struct sock *sk)
+bool tcp_use_frto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
if (!sysctl_tcp_frto)
- return 0;
+ return false;
/* MTU probe and F-RTO won't really play nicely along currently */
if (icsk->icsk_mtup.probe_size)
- return 0;
+ return false;
if (tcp_is_sackfrto(tp))
- return 1;
+ return true;
/* Avoid expensive walking of rexmit queue if possible */
if (tp->retrans_out > 1)
- return 0;
+ return false;
skb = tcp_write_queue_head(sk);
if (tcp_skb_is_last(sk, skb))
- return 1;
+ return true;
skb = tcp_write_queue_next(sk, skb); /* Skips head */
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
- return 0;
+ return false;
/* Short-circuit when first non-SACKed skb has been checked */
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
break;
}
- return 1;
+ return true;
}
/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2294,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how)
*
* Do processing similar to RTO timeout.
*/
-static int tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int flag)
{
if (flag & FLAG_SACK_RENEGING) {
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2305,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
icsk->icsk_rto, TCP_RTO_MAX);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2335,6 +2346,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
}
+static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long delay;
+
+ /* Delay early retransmit and entering fast recovery for
+ * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
+ * available, or RTO is scheduled to fire first.
+ */
+ if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
+ return false;
+
+ delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
+ if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
+ return false;
+
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
+ tp->early_retrans_delayed = 1;
+ return true;
+}
+
static inline int tcp_skb_timedout(const struct sock *sk,
const struct sk_buff *skb)
{
@@ -2442,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk)
* Main question: may we further continue forward transmission
* with the same cwnd?
*/
-static int tcp_time_to_recover(struct sock *sk)
+static bool tcp_time_to_recover(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out;
/* Do not perform any recovery during F-RTO algorithm */
if (tp->frto_counter)
- return 0;
+ return false;
/* Trick#1: The loss is proven. */
if (tp->lost_out)
- return 1;
+ return true;
/* Not-A-Trick#2 : Classic rule... */
if (tcp_dupack_heuristics(tp) > tp->reordering)
- return 1;
+ return true;
/* Trick#3 : when we use RFC2988 timer restart, fast
* retransmit can be triggered by timeout of queue head.
*/
if (tcp_is_fack(tp) && tcp_head_timedout(sk))
- return 1;
+ return true;
/* Trick#4: It is still not OK... But will it be useful to delay
* recovery more?
@@ -2475,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk)
/* We have nothing to send. This connection is limited
* either by receiver window or by application.
*/
- return 1;
+ return true;
}
/* If a thin stream is detected, retransmit after first
@@ -2486,9 +2518,19 @@ static int tcp_time_to_recover(struct sock *sk)
if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
tcp_is_sack(tp) && !tcp_send_head(sk))
- return 1;
+ return true;
- return 0;
+ /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
+ * retransmissions due to small network reorderings, we implement
+ * Mitigation A.3 in the RFC and delay the retransmission for a short
+ * interval if appropriate.
+ */
+ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
+ (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
+ !tcp_may_send_now(sk))
+ return !tcp_pause_early_retransmit(sk, flag);
+
+ return false;
}
/* New heuristics: it is possible only after we switched to restart timer
@@ -2676,22 +2718,22 @@ static void DBGUNDO(struct sock *sk, const char *msg)
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
- msg,
- &inet->inet_daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
- tp->snd_ssthresh, tp->prior_ssthresh,
- tp->packets_out);
+ pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
+ msg,
+ &inet->inet_daddr, ntohs(inet->inet_dport),
+ tp->snd_cwnd, tcp_left_out(tp),
+ tp->snd_ssthresh, tp->prior_ssthresh,
+ tp->packets_out);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
- msg,
- &np->daddr, ntohs(inet->inet_dport),
- tp->snd_cwnd, tcp_left_out(tp),
- tp->snd_ssthresh, tp->prior_ssthresh,
- tp->packets_out);
+ pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
+ msg,
+ &np->daddr, ntohs(inet->inet_dport),
+ tp->snd_cwnd, tcp_left_out(tp),
+ tp->snd_ssthresh, tp->prior_ssthresh,
+ tp->packets_out);
}
#endif
}
@@ -2727,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp)
}
/* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk)
+static bool tcp_try_undo_recovery(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2752,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk)
* is ACKed. For Reno it is MUST to prevent false
* fast retransmits (RFC2582). SACK TCP is safe. */
tcp_moderate_cwnd(tp);
- return 1;
+ return true;
}
tcp_set_ca_state(sk, TCP_CA_Open);
- return 0;
+ return false;
}
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2785,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk)
* that successive retransmissions of a segment must not advance
* retrans_stamp under any conditions.
*/
-static int tcp_any_retrans_done(const struct sock *sk)
+static bool tcp_any_retrans_done(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (tp->retrans_out)
- return 1;
+ return true;
skb = tcp_write_queue_head(sk);
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
- return 1;
+ return true;
- return 0;
+ return false;
}
/* Undo during fast recovery after partial ACK. */
@@ -2831,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
}
/* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk)
+static bool tcp_try_undo_loss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2853,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk)
tp->undo_marker = 0;
if (tcp_is_sack(tp))
tcp_set_ca_state(sk, TCP_CA_Open);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static inline void tcp_complete_cwr(struct sock *sk)
@@ -2864,11 +2906,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
/* Do not moderate cwnd if it's already undone in cwr or recovery. */
if (tp->undo_marker) {
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
- else /* PRR */
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
+ /* PRR algorithm. */
tp->snd_cwnd = tp->snd_ssthresh;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
@@ -3018,6 +3063,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
}
+static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int mib_idx;
+
+ if (tcp_is_reno(tp))
+ mib_idx = LINUX_MIB_TCPRENORECOVERY;
+ else
+ mib_idx = LINUX_MIB_TCPSACKRECOVERY;
+
+ NET_INC_STATS_BH(sock_net(sk), mib_idx);
+
+ tp->high_seq = tp->snd_nxt;
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = tp->snd_una;
+ tp->undo_retrans = tp->retrans_out;
+
+ if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ if (!ece_ack)
+ tp->prior_ssthresh = tcp_current_ssthresh(sk);
+ tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+ TCP_ECN_queue_cwr(tp);
+ }
+
+ tp->bytes_acked = 0;
+ tp->snd_cwnd_cnt = 0;
+ tp->prior_cwnd = tp->snd_cwnd;
+ tp->prr_delivered = 0;
+ tp->prr_out = 0;
+ tcp_set_ca_state(sk, TCP_CA_Recovery);
+}
+
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -3037,7 +3114,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
- int fast_rexmit = 0, mib_idx;
+ int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
tp->sacked_out = 0;
@@ -3121,7 +3198,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
- if (!tcp_time_to_recover(sk)) {
+ if (!tcp_time_to_recover(sk, flag)) {
tcp_try_to_open(sk, flag);
return;
}
@@ -3138,32 +3215,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
}
/* Otherwise enter Recovery state */
-
- if (tcp_is_reno(tp))
- mib_idx = LINUX_MIB_TCPRENORECOVERY;
- else
- mib_idx = LINUX_MIB_TCPSACKRECOVERY;
-
- NET_INC_STATS_BH(sock_net(sk), mib_idx);
-
- tp->high_seq = tp->snd_nxt;
- tp->prior_ssthresh = 0;
- tp->undo_marker = tp->snd_una;
- tp->undo_retrans = tp->retrans_out;
-
- if (icsk->icsk_ca_state < TCP_CA_CWR) {
- if (!(flag & FLAG_ECE))
- tp->prior_ssthresh = tcp_current_ssthresh(sk);
- tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
- TCP_ECN_queue_cwr(tp);
- }
-
- tp->bytes_acked = 0;
- tp->snd_cwnd_cnt = 0;
- tp->prior_cwnd = tp->snd_cwnd;
- tp->prr_delivered = 0;
- tp->prr_out = 0;
- tcp_set_ca_state(sk, TCP_CA_Recovery);
+ tcp_enter_recovery(sk, (flag & FLAG_ECE));
fast_rexmit = 1;
}
@@ -3245,16 +3297,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* Restart timer after forward progress on connection.
* RFC2988 recommends to restart timer to now+rto.
*/
-static void tcp_rearm_rto(struct sock *sk)
+void tcp_rearm_rto(struct sock *sk)
{
- const struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else {
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ u32 rto = inet_csk(sk)->icsk_rto;
+ /* Offset the time elapsed after installing regular RTO */
+ if (tp->early_retrans_delayed) {
+ struct sk_buff *skb = tcp_write_queue_head(sk);
+ const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
+ s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
+ /* delta may not be positive if the socket is locked
+ * when the delayed ER timer fires and is rescheduled.
+ */
+ if (delta > 0)
+ rto = delta;
+ }
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+ TCP_RTO_MAX);
}
+ tp->early_retrans_delayed = 0;
+}
+
+/* This function is called when the delayed ER timer fires. TCP enters
+ * fast recovery and performs fast-retransmit.
+ */
+void tcp_resume_early_retransmit(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tcp_rearm_rto(sk);
+
+ /* Stop if ER is disabled after the delayed ER timer is scheduled */
+ if (!tp->do_early_retrans)
+ return;
+
+ tcp_enter_recovery(sk, false);
+ tcp_update_scoreboard(sk, 1);
+ tcp_xmit_retransmit_queue(sk);
}
/* If we get here, the whole TSO packet has not been acked. */
@@ -3289,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
u32 now = tcp_time_stamp;
- int fully_acked = 1;
+ int fully_acked = true;
int flag = 0;
u32 pkts_acked = 0;
u32 reord = tp->packets_out;
@@ -3313,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!acked_pcount)
break;
- fully_acked = 0;
+ fully_acked = false;
} else {
acked_pcount = tcp_skb_pcount(skb);
}
@@ -3430,18 +3513,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!tp->packets_out && tcp_is_sack(tp)) {
icsk = inet_csk(sk);
if (tp->lost_out) {
- printk(KERN_DEBUG "Leak l=%u %d\n",
- tp->lost_out, icsk->icsk_ca_state);
+ pr_debug("Leak l=%u %d\n",
+ tp->lost_out, icsk->icsk_ca_state);
tp->lost_out = 0;
}
if (tp->sacked_out) {
- printk(KERN_DEBUG "Leak s=%u %d\n",
- tp->sacked_out, icsk->icsk_ca_state);
+ pr_debug("Leak s=%u %d\n",
+ tp->sacked_out, icsk->icsk_ca_state);
tp->sacked_out = 0;
}
if (tp->retrans_out) {
- printk(KERN_DEBUG "Leak r=%u %d\n",
- tp->retrans_out, icsk->icsk_ca_state);
+ pr_debug("Leak r=%u %d\n",
+ tp->retrans_out, icsk->icsk_ca_state);
tp->retrans_out = 0;
}
}
@@ -3592,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
-static int tcp_process_frto(struct sock *sk, int flag)
+static bool tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3608,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
if (!before(tp->snd_una, tp->frto_highmark)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
- return 1;
+ return true;
}
if (!tcp_is_sackfrto(tp)) {
@@ -3617,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag)
* data, winupdate
*/
if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
- return 1;
+ return true;
if (!(flag & FLAG_DATA_ACKED)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
flag);
- return 1;
+ return true;
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
- return 1;
+ return true;
}
if ((tp->frto_counter >= 2) &&
@@ -3639,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag)
/* RFC4138 shortcoming (see comment above) */
if (!(flag & FLAG_FORWARD_PROGRESS) &&
(flag & FLAG_NOT_DUP))
- return 1;
+ return true;
tcp_enter_frto_loss(sk, 3, flag);
- return 1;
+ return true;
}
}
@@ -3654,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
if (!tcp_may_send_now(sk))
tcp_enter_frto_loss(sk, 2, flag);
- return 1;
+ return true;
} else {
switch (sysctl_tcp_frto_response) {
case 2:
@@ -3671,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
tp->undo_marker = 0;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
}
- return 0;
+ return false;
}
/* This routine deals with incoming acks, but not outgoing ones. */
@@ -3689,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
int newly_acked_sacked = 0;
- int frto_cwnd = 0;
+ bool frto_cwnd = false;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3703,6 +3786,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt))
goto invalid_ack;
+ if (tp->early_retrans_delayed)
+ tcp_rearm_rto(sk);
+
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
@@ -3868,10 +3954,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
- if (net_ratelimit())
- pr_info("%s: Illegal window scaling value %d >14 received\n",
- __func__,
- snd_wscale);
+ net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n",
+ __func__,
+ snd_wscale);
snd_wscale = 14;
}
opt_rx->snd_wscale = snd_wscale;
@@ -3942,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
}
EXPORT_SYMBOL(tcp_parse_options);
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
+static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
{
const __be32 *ptr = (const __be32 *)(th + 1);
@@ -3953,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr;
tp->rx_opt.rcv_tsecr = ntohl(*ptr);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
-static int tcp_fast_parse_options(const struct sk_buff *skb,
- const struct tcphdr *th,
- struct tcp_sock *tp, const u8 **hvpp)
+static bool tcp_fast_parse_options(const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct tcp_sock *tp, const u8 **hvpp)
{
/* In the spirit of fast parsing, compare doff directly to constant
* values. Because equality is used, short doff can be ignored here.
*/
if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
- return 0;
+ return false;
} else if (tp->rx_opt.tstamp_ok &&
th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
if (tcp_parse_aligned_timestamp(tp, th))
- return 1;
+ return true;
}
tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
- return 1;
+ return true;
}
#ifdef CONFIG_TCP_MD5SIG
@@ -4218,7 +4303,7 @@ static void tcp_fin(struct sock *sk)
}
}
-static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
+static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
u32 end_seq)
{
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4226,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
sp->start_seq = seq;
if (after(end_seq, sp->end_seq))
sp->end_seq = end_seq;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4424,10 +4509,10 @@ static void tcp_ofo_queue(struct sock *sk)
}
}
-static int tcp_prune_ofo_queue(struct sock *sk);
+static bool tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);
-static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, size)) {
@@ -4446,6 +4531,46 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
return 0;
}
+/**
+ * tcp_try_coalesce - try to merge skb to prior one
+ * @sk: socket
+ * @to: prior buffer
+ * @from: buffer to add in queue
+ * @fragstolen: pointer to boolean
+ *
+ * Before queueing skb @from after @to, try to merge them
+ * to reduce overall memory use and queue lengths, if cost is small.
+ * Packets in ofo or receive queues can stay a long time.
+ * Better try to coalesce them right now to avoid future collapses.
+ * Returns true if caller should free @from instead of queueing it
+ */
+static bool tcp_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ int delta;
+
+ *fragstolen = false;
+
+ if (tcp_hdr(from)->fin)
+ return false;
+
+ /* Its possible this segment overlaps with prior segment in queue */
+ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
+ return false;
+
+ if (!skb_try_coalesce(to, from, fragstolen, &delta))
+ return false;
+
+ atomic_add(delta, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, delta);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+ TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
+ TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
+ return true;
+}
+
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4484,23 +4609,13 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) {
- /* Packets in ofo can stay in queue a long time.
- * Better try to coalesce them right now
- * to avoid future tcp_collapse_ofo_queue(),
- * probably the most expensive function in tcp stack.
- */
- if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPRCVCOALESCE);
- BUG_ON(skb_copy_bits(skb, 0,
- skb_put(skb1, skb->len),
- skb->len));
- TCP_SKB_CB(skb1)->end_seq = end_seq;
- TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
- __kfree_skb(skb);
- skb = NULL;
- } else {
+ bool fragstolen;
+
+ if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+ } else {
+ kfree_skb_partial(skb, fragstolen);
+ skb = NULL;
}
if (!tp->rx_opt.num_sacks ||
@@ -4576,12 +4691,65 @@ end:
skb_set_owner_r(skb, sk);
}
+static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
+ bool *fragstolen)
+{
+ int eaten;
+ struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
+
+ __skb_pull(skb, hdrlen);
+ eaten = (tail &&
+ tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
+ tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ if (!eaten) {
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
+ }
+ return eaten;
+}
+
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct sk_buff *skb;
+ struct tcphdr *th;
+ bool fragstolen;
+
+ if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
+ goto err;
+
+ skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
+ if (!skb)
+ goto err;
+
+ th = (struct tcphdr *)skb_put(skb, sizeof(*th));
+ skb_reset_transport_header(skb);
+ memset(th, 0, sizeof(*th));
+
+ if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+ goto err_free;
+
+ TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
+ TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
+
+ if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
+ WARN_ON_ONCE(fragstolen); /* should not happen */
+ __kfree_skb(skb);
+ }
+ return size;
+
+err_free:
+ kfree_skb(skb);
+err:
+ return -ENOMEM;
+}
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
+ bool fragstolen = false;
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
goto drop;
@@ -4626,8 +4794,7 @@ queue_and_out:
tcp_try_rmem_schedule(sk, skb->truesize))
goto drop;
- skb_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (skb->len)
@@ -4651,7 +4818,7 @@ queue_and_out:
tcp_fast_path_check(sk);
if (eaten > 0)
- __kfree_skb(skb);
+ kfree_skb_partial(skb, fragstolen);
else if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
return;
@@ -4871,10 +5038,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
* Purge the out-of-order queue.
* Return true if queue was pruned.
*/
-static int tcp_prune_ofo_queue(struct sock *sk)
+static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- int res = 0;
+ bool res = false;
if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -4888,7 +5055,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
if (tp->rx_opt.sack_ok)
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
- res = 1;
+ res = true;
}
return res;
}
@@ -4965,7 +5132,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static int tcp_should_expand_sndbuf(const struct sock *sk)
+static bool tcp_should_expand_sndbuf(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -4973,21 +5140,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
* not modify it.
*/
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
- return 0;
+ return false;
/* If we are under global TCP memory pressure, do not expand. */
if (sk_under_memory_pressure(sk))
- return 0;
+ return false;
/* If we are under soft global TCP memory pressure, do not expand. */
if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
- return 0;
+ return false;
/* If we filled the congestion window, do not expand. */
if (tp->packets_out >= tp->snd_cwnd)
- return 0;
+ return false;
- return 1;
+ return true;
}
/* When incoming ACK allowed to free some skb from write_queue,
@@ -5213,19 +5380,19 @@ static inline int tcp_checksum_complete_user(struct sock *sk,
}
#ifdef CONFIG_NET_DMA
-static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
+static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int dma_cookie;
- int copied_early = 0;
+ bool copied_early = false;
if (tp->ucopy.wakeup)
- return 0;
+ return false;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+ tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
@@ -5238,7 +5405,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
goto out;
tp->ucopy.dma_cookie = dma_cookie;
- copied_early = 1;
+ copied_early = true;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
@@ -5430,6 +5597,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
} else {
int eaten = 0;
int copied_early = 0;
+ bool fragstolen = false;
if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
@@ -5487,10 +5655,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
- __skb_pull(skb, tcp_header_len);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- skb_set_owner_r(skb, sk);
- tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
+ &fragstolen);
}
tcp_event_data_recv(sk, skb);
@@ -5512,7 +5678,7 @@ no_ack:
else
#endif
if (eaten)
- __kfree_skb(skb);
+ kfree_skb_partial(skb, fragstolen);
else
sk->sk_data_ready(sk, 0);
return 0;
@@ -5556,6 +5722,44 @@ discard:
}
EXPORT_SYMBOL(tcp_rcv_established);
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ tcp_set_state(sk, TCP_ESTABLISHED);
+
+ if (skb != NULL)
+ security_inet_conn_established(sk, skb);
+
+ /* Make sure socket is routed, for correct metrics. */
+ icsk->icsk_af_ops->rebuild_header(sk);
+
+ tcp_init_metrics(sk);
+
+ tcp_init_congestion_control(sk);
+
+ /* Prevent spurious tcp_cwnd_restart() on first data
+ * packet.
+ */
+ tp->lsndtime = tcp_time_stamp;
+
+ tcp_init_buffer_space(sk);
+
+ if (sock_flag(sk, SOCK_KEEPOPEN))
+ inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
+
+ if (!tp->rx_opt.snd_wscale)
+ __tcp_fast_path_on(tp, tp->snd_wnd);
+ else
+ tp->pred_flags = 0;
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+ }
+}
+
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
@@ -5688,36 +5892,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
}
smp_mb();
- tcp_set_state(sk, TCP_ESTABLISHED);
-
- security_inet_conn_established(sk, skb);
-
- /* Make sure socket is routed, for correct metrics. */
- icsk->icsk_af_ops->rebuild_header(sk);
- tcp_init_metrics(sk);
-
- tcp_init_congestion_control(sk);
-
- /* Prevent spurious tcp_cwnd_restart() on first data
- * packet.
- */
- tp->lsndtime = tcp_time_stamp;
-
- tcp_init_buffer_space(sk);
-
- if (sock_flag(sk, SOCK_KEEPOPEN))
- inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
-
- if (!tp->rx_opt.snd_wscale)
- __tcp_fast_path_on(tp, tp->snd_wnd);
- else
- tp->pred_flags = 0;
-
- if (!sock_flag(sk, SOCK_DEAD)) {
- sk->sk_state_change(sk);
- sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
- }
+ tcp_finish_connect(sk, skb);
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
@@ -5731,8 +5907,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
*/
inet_csk_schedule_ack(sk);
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
- icsk->icsk_ack.ato = TCP_ATO_MIN;
- tcp_incr_quickack(sk);
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf743f8b..a43b87dfe800 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);
+static int tcp_repair_connect(struct sock *sk)
+{
+ tcp_connect_init(sk);
+ tcp_finish_connect(sk, NULL);
+
+ return 0;
+}
+
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
@@ -196,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Reset inherited state */
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
- tp->write_seq = 0;
+ if (likely(!tp->repair))
+ tp->write_seq = 0;
}
if (tcp_death_row.sysctl_tw_recycle &&
@@ -247,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
- if (!tp->write_seq)
+ if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
@@ -255,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_id = tp->write_seq ^ jiffies;
- err = tcp_connect(sk);
+ if (likely(!tp->repair))
+ err = tcp_connect(sk);
+ else
+ err = tcp_repair_connect(sk);
+
rt = NULL;
if (err)
goto failure;
@@ -853,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
}
/*
- * Return 1 if a syncookie should be sent
+ * Return true if a syncookie should be sent
*/
-int tcp_syn_flood_action(struct sock *sk,
+bool tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto)
{
const char *msg = "Dropping request";
- int want_cookie = 0;
+ bool want_cookie = false;
struct listen_sock *lopt;
@@ -868,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk,
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
msg = "Sending cookies";
- want_cookie = 1;
+ want_cookie = true;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
} else
#endif
@@ -1183,7 +1196,7 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1206,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
- return 0;
+ return false;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- return 1;
+ return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- return 1;
+ return true;
}
/* Okay, so this is hash_expected and hash_location -
@@ -1226,15 +1239,14 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
- if (net_ratelimit()) {
- pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
- &iph->saddr, ntohs(th->source),
- &iph->daddr, ntohs(th->dest),
- genhash ? " tcp_v4_calc_md5_hash failed" : "");
- }
- return 1;
+ net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+ &iph->saddr, ntohs(th->source),
+ &iph->daddr, ntohs(th->dest),
+ genhash ? " tcp_v4_calc_md5_hash failed"
+ : "");
+ return true;
}
- return 0;
+ return false;
}
#endif
@@ -1268,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
- int want_cookie = 0;
+ bool want_cookie = false;
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1327,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
- want_cookie = 0; /* not our kind of cookie */
+ want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
@@ -1355,7 +1367,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, tcp_hdr(skb));
+ TCP_ECN_create_request(req, skb);
if (want_cookie) {
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1730,7 +1742,7 @@ process:
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+ tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb);
else
@@ -1739,7 +1751,8 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
- } else if (unlikely(sk_add_backlog(sk, skb))) {
+ } else if (unlikely(sk_add_backlog(sk, skb,
+ sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
@@ -1875,64 +1888,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
static int tcp_v4_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- skb_queue_head_init(&tp->out_of_order_queue);
- tcp_init_xmit_timers(sk);
- tcp_prequeue_init(tp);
-
- icsk->icsk_rto = TCP_TIMEOUT_INIT;
- tp->mdev = TCP_TIMEOUT_INIT;
-
- /* So many TCP implementations out there (incorrectly) count the
- * initial SYN frame in their delayed-ACK and congestion control
- * algorithms that we must have the following bandaid to talk
- * efficiently to them. -DaveM
- */
- tp->snd_cwnd = TCP_INIT_CWND;
-
- /* See draft-stevens-tcpca-spec-01 for discussion of the
- * initialization of these values.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- tp->snd_cwnd_clamp = ~0;
- tp->mss_cache = TCP_MSS_DEFAULT;
-
- tp->reordering = sysctl_tcp_reordering;
- icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-
- sk->sk_state = TCP_CLOSE;
-
- sk->sk_write_space = sk_stream_write_space;
- sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+ tcp_init_sock(sk);
icsk->icsk_af_ops = &ipv4_specific;
- icsk->icsk_sync_mss = tcp_sync_mss;
+
#ifdef CONFIG_TCP_MD5SIG
- tp->af_specific = &tcp_sock_ipv4_specific;
+ tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
#endif
- /* TCP Cookie Transactions */
- if (sysctl_tcp_cookie_size > 0) {
- /* Default, cookies without s_data_payload. */
- tp->cookie_values =
- kzalloc(sizeof(*tp->cookie_values),
- sk->sk_allocation);
- if (tp->cookie_values != NULL)
- kref_init(&tp->cookie_values->kref);
- }
- /* Presumed zeroed, in order of appearance:
- * cookie_in_always, cookie_out_never,
- * s_data_constant, s_data_in, s_data_out
- */
- sk->sk_sndbuf = sysctl_tcp_wmem[1];
- sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
- local_bh_disable();
- sock_update_memcg(sk);
- sk_sockets_allocated_inc(sk);
- local_bh_enable();
-
return 0;
}
@@ -2109,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc;
}
-static inline int empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(struct tcp_iter_state *st)
{
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index e795272fbe9e..b6f3583ddfe8 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,37 +6,6 @@
#include <linux/memcontrol.h>
#include <linux/module.h>
-static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
-static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
- const char *buffer);
-static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
-
-static struct cftype tcp_files[] = {
- {
- .name = "kmem.tcp.limit_in_bytes",
- .write_string = tcp_cgroup_write,
- .read_u64 = tcp_cgroup_read,
- .private = RES_LIMIT,
- },
- {
- .name = "kmem.tcp.usage_in_bytes",
- .read_u64 = tcp_cgroup_read,
- .private = RES_USAGE,
- },
- {
- .name = "kmem.tcp.failcnt",
- .private = RES_FAILCNT,
- .trigger = tcp_cgroup_reset,
- .read_u64 = tcp_cgroup_read,
- },
- {
- .name = "kmem.tcp.max_usage_in_bytes",
- .private = RES_MAX_USAGE,
- .trigger = tcp_cgroup_reset,
- .read_u64 = tcp_cgroup_read,
- },
-};
-
static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
{
return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
@@ -49,7 +18,7 @@ static void memcg_tcp_enter_memory_pressure(struct sock *sk)
}
EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
-int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
/*
* The root cgroup does not use res_counters, but rather,
@@ -59,13 +28,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
struct res_counter *res_parent = NULL;
struct cg_proto *cg_proto, *parent_cg;
struct tcp_memcontrol *tcp;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
struct net *net = current->nsproxy->net_ns;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
- goto create_files;
+ return 0;
tcp = tcp_from_cgproto(cg_proto);
@@ -88,15 +56,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
cg_proto->memcg = memcg;
-create_files:
- return cgroup_add_files(cgrp, ss, tcp_files,
- ARRAY_SIZE(tcp_files));
+ return 0;
}
EXPORT_SYMBOL(tcp_init_cgroup);
-void tcp_destroy_cgroup(struct cgroup *cgrp)
+void tcp_destroy_cgroup(struct mem_cgroup *memcg)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct cg_proto *cg_proto;
struct tcp_memcontrol *tcp;
u64 val;
@@ -109,9 +74,6 @@ void tcp_destroy_cgroup(struct cgroup *cgrp)
percpu_counter_destroy(&tcp->tcp_sockets_allocated);
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-
- if (val != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
@@ -142,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
net->ipv4.sysctl_tcp_mem[i]);
- if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
- else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
- static_key_slow_inc(&memcg_socket_limit_enabled);
+ if (val == RESOURCE_MAX)
+ clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ else if (val != RESOURCE_MAX) {
+ /*
+ * The active bit needs to be written after the static_key
+ * update. This is what guarantees that the socket activation
+ * function is the last one to run. See sock_update_memcg() for
+ * details, and note that we don't mark any socket as belonging
+ * to this memcg until that flag is up.
+ *
+ * We need to do this, because static_keys will span multiple
+ * sites, but we can't control their order. If we mark a socket
+ * as accounted, but the accounting functions are not patched in
+ * yet, we'll lose accounting.
+ *
+ * We never race with the readers in sock_update_memcg(),
+ * because when this value change, the code to process it is not
+ * patched in yet.
+ *
+ * The activated bit is used to guarantee that no two writers
+ * will do the update in the same memcg. Without that, we can't
+ * properly shutdown the static key.
+ */
+ if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+ static_key_slow_inc(&memcg_socket_limit_enabled);
+ set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ }
return 0;
}
@@ -270,3 +255,37 @@ void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
tcp->tcp_prot_mem[idx] = val;
}
+
+static struct cftype tcp_files[] = {
+ {
+ .name = "kmem.tcp.limit_in_bytes",
+ .write_string = tcp_cgroup_write,
+ .read_u64 = tcp_cgroup_read,
+ .private = RES_LIMIT,
+ },
+ {
+ .name = "kmem.tcp.usage_in_bytes",
+ .read_u64 = tcp_cgroup_read,
+ .private = RES_USAGE,
+ },
+ {
+ .name = "kmem.tcp.failcnt",
+ .private = RES_FAILCNT,
+ .trigger = tcp_cgroup_reset,
+ .read_u64 = tcp_cgroup_read,
+ },
+ {
+ .name = "kmem.tcp.max_usage_in_bytes",
+ .private = RES_MAX_USAGE,
+ .trigger = tcp_cgroup_reset,
+ .read_u64 = tcp_cgroup_read,
+ },
+ { } /* terminate */
+};
+
+static int __init tcp_memcontrol_init(void)
+{
+ WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
+ return 0;
+}
+__initcall(tcp_memcontrol_init);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3cabafb5cdd1..b85d9fe7d663 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
* state.
*/
-static int tcp_remember_stamp(struct sock *sk)
+static bool tcp_remember_stamp(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
}
if (release_it)
inet_putpeer(peer);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
{
struct sock *sk = (struct sock *) tw;
struct inet_peer *peer;
@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
+static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
- return 1;
+ return true;
if (after(end_seq, s_win) && before(seq, e_win))
- return 1;
+ return true;
return seq == e_win && seq == end_seq;
}
@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_options_received tmp_opt;
const u8 *hash_location;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
- int paws_reject = 0;
+ bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
- int recycle_ok = 0;
+ bool recycle_ok = false;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tcp_remember_stamp(sk);
@@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->sacked_out = 0;
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ tcp_enable_early_retrans(newtp);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -574,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
- int paws_reject = 0;
+ bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 364784a91939..803cbfe82fbc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -34,6 +34,8 @@
*
*/
+#define pr_fmt(fmt) "TCP: " fmt
+
#include <net/tcp.h>
#include <linux/compiler.h>
@@ -78,9 +80,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
tp->frto_counter = 3;
tp->packets_out += tcp_skb_pcount(skb);
- if (!prior_packets)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ if (!prior_packets || tp->early_retrans_delayed)
+ tcp_rearm_rto(sk);
}
/* SND.NXT, if window was not shrunk.
@@ -369,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->end_seq = seq;
}
-static inline int tcp_urg_mode(const struct tcp_sock *tp)
+static inline bool tcp_urg_mode(const struct tcp_sock *tp)
{
return tp->snd_una != tp->snd_up;
}
@@ -563,13 +564,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
/* Compute TCP options for SYN packets. This is not the final
* network wire format yet.
*/
-static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
- unsigned remaining = MAX_TCP_OPTION_SPACE;
+ unsigned int remaining = MAX_TCP_OPTION_SPACE;
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
0;
@@ -663,15 +664,15 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
/* Set up TCP options for SYN-ACKs. */
-static unsigned tcp_synack_options(struct sock *sk,
+static unsigned int tcp_synack_options(struct sock *sk,
struct request_sock *req,
- unsigned mss, struct sk_buff *skb,
+ unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5,
struct tcp_extend_values *xvp)
{
struct inet_request_sock *ireq = inet_rsk(req);
- unsigned remaining = MAX_TCP_OPTION_SPACE;
+ unsigned int remaining = MAX_TCP_OPTION_SPACE;
u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
xvp->cookie_plus :
0;
@@ -742,13 +743,13 @@ static unsigned tcp_synack_options(struct sock *sk,
/* Compute TCP options for ESTABLISHED sockets. This is not the
* final wire format yet.
*/
-static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
- unsigned size = 0;
+ unsigned int size = 0;
unsigned int eff_sacks;
#ifdef CONFIG_TCP_MD5SIG
@@ -770,9 +771,9 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
if (unlikely(eff_sacks)) {
- const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
+ const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
opts->num_sack_blocks =
- min_t(unsigned, eff_sacks,
+ min_t(unsigned int, eff_sacks,
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
TCPOLEN_SACK_PERBLOCK);
size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -801,7 +802,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
struct tcp_out_options opts;
- unsigned tcp_options_size, tcp_header_size;
+ unsigned int tcp_options_size, tcp_header_size;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
int err;
@@ -1096,6 +1097,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
eat = min_t(int, len, skb_headlen(skb));
if (eat) {
__skb_pull(skb, eat);
+ skb->avail_size -= eat;
len -= eat;
if (!len)
return;
@@ -1149,7 +1151,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
}
/* Calculate MSS. Not accounting for SACKs here. */
-int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
+int tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1160,6 +1162,14 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
*/
mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
+ /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
+ if (icsk->icsk_af_ops->net_frag_header_len) {
+ const struct dst_entry *dst = __sk_dst_get(sk);
+
+ if (dst && dst_allfrag(dst))
+ mss_now -= icsk->icsk_af_ops->net_frag_header_len;
+ }
+
/* Clamp it (mss_clamp does not include tcp options) */
if (mss_now > tp->rx_opt.mss_clamp)
mss_now = tp->rx_opt.mss_clamp;
@@ -1178,7 +1188,7 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
}
/* Inverse of above */
-int tcp_mss_to_mtu(const struct sock *sk, int mss)
+int tcp_mss_to_mtu(struct sock *sk, int mss)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1189,6 +1199,13 @@ int tcp_mss_to_mtu(const struct sock *sk, int mss)
icsk->icsk_ext_hdr_len +
icsk->icsk_af_ops->net_header_len;
+ /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
+ if (icsk->icsk_af_ops->net_frag_header_len) {
+ const struct dst_entry *dst = __sk_dst_get(sk);
+
+ if (dst && dst_allfrag(dst))
+ mtu += icsk->icsk_af_ops->net_frag_header_len;
+ }
return mtu;
}
@@ -1258,7 +1275,7 @@ unsigned int tcp_current_mss(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
const struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
- unsigned header_len;
+ unsigned int header_len;
struct tcp_out_options opts;
struct tcp_md5sig_key *md5;
@@ -1374,33 +1391,33 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
}
/* Minshall's variant of the Nagle send check. */
-static inline int tcp_minshall_check(const struct tcp_sock *tp)
+static inline bool tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
-/* Return 0, if packet can be sent now without violation Nagle's rules:
+/* Return false, if packet can be sent now without violation Nagle's rules:
* 1. It is full sized.
* 2. Or it contains FIN. (already checked by caller)
* 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
* With Minshall's modification: all sent small packets are ACKed.
*/
-static inline int tcp_nagle_check(const struct tcp_sock *tp,
+static inline bool tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
- unsigned mss_now, int nonagle)
+ unsigned int mss_now, int nonagle)
{
return skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
-/* Return non-zero if the Nagle test allows this packet to be
+/* Return true if the Nagle test allows this packet to be
* sent now.
*/
-static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
- unsigned int cur_mss, int nonagle)
+static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
+ unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
* write_queue (they have no chances to get new data).
@@ -1409,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
* argument based upon the location of SKB in the send queue.
*/
if (nonagle & TCP_NAGLE_PUSH)
- return 1;
+ return true;
/* Don't use the nagle rule for urgent data (or for the final FIN).
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
- return 1;
+ return true;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
- return 1;
+ return true;
- return 0;
+ return false;
}
/* Does at least the first segment of SKB fit into the send window? */
-static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
- unsigned int cur_mss)
+static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
+ const struct sk_buff *skb,
+ unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1459,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
}
/* Test if sending is allowed right now. */
-int tcp_may_send_now(struct sock *sk)
+bool tcp_may_send_now(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
@@ -1529,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
*
* This algorithm is from John Heffner.
*/
-static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1589,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* Ok, it looks like it is advisable to defer. */
tp->tso_deferred = 1 | (jiffies << 1);
- return 1;
+ return true;
send_now:
tp->tso_deferred = 0;
- return 0;
+ return false;
}
/* Create a new MTU probe if we are ready.
@@ -1735,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
* account rare use of URG, this is not a big flaw.
*
- * Returns 1, if no segments are in flight and we have queued segments, but
- * cannot send anything now because of SWS or another problem.
+ * Returns true, if no segments are in flight and we have queued segments,
+ * but cannot send anything now because of SWS or another problem.
*/
-static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
- int push_one, gfp_t gfp)
+static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1753,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
if (!result) {
- return 0;
+ return false;
} else if (result > 0) {
sent_pkts = 1;
}
@@ -1812,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (likely(sent_pkts)) {
tcp_cwnd_validate(sk);
- return 0;
+ return false;
}
return !tp->packets_out && tcp_send_head(sk);
}
@@ -2011,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
}
/* Check if coalescing SKBs is legal. */
-static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
+static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
- return 0;
+ return false;
/* TODO: SACK collapsing could be used to remove this condition */
if (skb_shinfo(skb)->nr_frags != 0)
- return 0;
+ return false;
if (skb_cloned(skb))
- return 0;
+ return false;
if (skb == tcp_send_head(sk))
- return 0;
+ return false;
/* Some heurestics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
- return 0;
+ return false;
- return 1;
+ return true;
}
/* Collapse packets in the retransmit queue to make to create
@@ -2037,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = to, *tmp;
- int first = 1;
+ bool first = true;
if (!sysctl_tcp_retrans_collapse)
return;
@@ -2051,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
space -= skb->len;
if (first) {
- first = 0;
+ first = false;
continue;
}
@@ -2060,7 +2078,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
/* Punt if not enough space exists in the first SKB for
* the data in the second
*/
- if (skb->len > skb_tailroom(to))
+ if (skb->len > skb_availroom(to))
break;
if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
@@ -2166,8 +2184,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
- if (net_ratelimit())
- printk(KERN_DEBUG "retrans_out leaked.\n");
+ net_dbg_ratelimited("retrans_out leaked\n");
}
#endif
if (!tp->retrans_out)
@@ -2192,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Check if we forward retransmits are possible in the current
* window/congestion state.
*/
-static int tcp_can_forward_retransmit(struct sock *sk)
+static bool tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
- return 0;
+ return false;
/* No forward retransmissions in Reno are possible. */
if (tcp_is_reno(tp))
- return 0;
+ return false;
/* Yeah, we have to make difficult choice between forward transmission
* and retransmission... Both ways have their merits...
@@ -2214,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
*/
if (tcp_may_send_now(sk))
- return 0;
+ return false;
- return 1;
+ return true;
}
/* This gets called after a retransmit timeout, and the initially
@@ -2401,7 +2418,7 @@ int tcp_send_synack(struct sock *sk)
skb = tcp_write_queue_head(sk);
if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
- printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
+ pr_debug("%s: wrong queue state\n", __func__);
return -EFAULT;
}
if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
@@ -2561,7 +2578,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
-static void tcp_connect_init(struct sock *sk)
+void tcp_connect_init(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -2616,9 +2633,12 @@ static void tcp_connect_init(struct sock *sk)
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
- tp->rcv_nxt = 0;
- tp->rcv_wup = 0;
- tp->copied_seq = 0;
+ tp->snd_nxt = tp->write_seq;
+
+ if (likely(!tp->repair))
+ tp->rcv_nxt = 0;
+ tp->rcv_wup = tp->rcv_nxt;
+ tp->copied_seq = tp->rcv_nxt;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
inet_csk(sk)->icsk_retransmits = 0;
@@ -2641,7 +2661,6 @@ int tcp_connect(struct sock *sk)
/* Reserve space for headers. */
skb_reserve(buff, MAX_TCP_HEADER);
- tp->snd_nxt = tp->write_seq;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff);
@@ -2790,6 +2809,15 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
+void tcp_send_window_probe(struct sock *sk)
+{
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+ tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
+ tcp_xmit_probe_skb(sk, 0);
+ }
+}
+
/* Initiate keepalive or window probe from timer. */
int tcp_write_wakeup(struct sock *sk)
{
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index a981cdc0a6e9..4526fe68e60e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -91,7 +91,7 @@ static inline int tcp_probe_avail(void)
* Note: arguments must match tcp_rcv_established()!
*/
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ struct tcphdr *th, unsigned int len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
@@ -138,7 +138,7 @@ static struct jprobe tcp_jprobe = {
.entry = jtcp_rcv_established,
};
-static int tcpprobe_open(struct inode * inode, struct file * file)
+static int tcpprobe_open(struct inode *inode, struct file *file)
{
/* Reset (empty) log */
spin_lock_bh(&tcp_probe.lock);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02c2f16..e911e6c523ec 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ if (tp->early_retrans_delayed) {
+ tcp_resume_early_retransmit(sk);
+ return;
+ }
+
if (!tp->packets_out)
goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe141052a1be..eaca73644e79 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -107,6 +107,7 @@
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
+#include <linux/static_key.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
@@ -206,7 +207,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (!snum) {
int low, high, remaining;
- unsigned rand;
+ unsigned int rand;
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
@@ -846,7 +847,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* Get and verify the address.
*/
if (msg->msg_name) {
- struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
+ struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
@@ -1379,6 +1380,14 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
+static struct static_key udp_encap_needed __read_mostly;
+void udp_encap_enable(void)
+{
+ if (!static_key_enabled(&udp_encap_needed))
+ static_key_slow_inc(&udp_encap_needed);
+}
+EXPORT_SYMBOL(udp_encap_enable);
+
/* returns:
* -1: error
* 0: success
@@ -1400,7 +1409,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
nf_reset(skb);
- if (up->encap_type) {
+ if (static_key_false(&udp_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
@@ -1470,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
- if (sk_rcvqueues_full(sk, skb))
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop;
rc = 0;
@@ -1479,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
- else if (sk_add_backlog(sk, skb)) {
+ else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
@@ -1760,6 +1769,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
+ udp_encap_enable();
break;
default:
err = -ENOPROTOOPT;
@@ -2163,9 +2173,15 @@ void udp4_proc_exit(void)
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{
+ ssize_t ret;
+
if (!str)
return 0;
- uhash_entries = simple_strtoul(str, &str, 0);
+
+ ret = kstrtoul(str, 0, &uhash_entries);
+ if (ret)
+ return 0;
+
if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
uhash_entries = UDP_HTABLE_SIZE_MIN;
return 1;
@@ -2176,26 +2192,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
- if (!CONFIG_BASE_SMALL)
- table->hash = alloc_large_system_hash(name,
- 2 * sizeof(struct udp_hslot),
- uhash_entries,
- 21, /* one slot per 2 MB */
- 0,
- &table->log,
- &table->mask,
- 64 * 1024);
- /*
- * Make sure hash table has the minimum size
- */
- if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
- table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
- 2 * sizeof(struct udp_hslot), GFP_KERNEL);
- if (!table->hash)
- panic(name);
- table->log = ilog2(UDP_HTABLE_SIZE_MIN);
- table->mask = UDP_HTABLE_SIZE_MIN - 1;
- }
+ table->hash = alloc_large_system_hash(name,
+ 2 * sizeof(struct udp_hslot),
+ uhash_entries,
+ 21, /* one slot per 2 MB */
+ 0,
+ &table->log,
+ &table->mask,
+ UDP_HTABLE_SIZE_MIN,
+ 64 * 1024);
+
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
+static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info)
+{
+ r->idiag_rqueue = sk_rmem_alloc_get(sk);
+ r->idiag_wqueue = sk_wmem_alloc_get(sk);
+}
+
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
};
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
static const struct inet_diag_handler udplite_diag_handler = {
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
};
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index aaad650d47d9..5a681e298b90 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len);
extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index a0b4c5da8d43..0d3426cb5c4f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -152,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
case IPPROTO_AH:
if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
- __be32 *ah_hdr = (__be32*)xprth;
+ __be32 *ah_hdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1];
}
@@ -298,8 +298,8 @@ void __init xfrm4_init(int rt_max_size)
xfrm4_state_init();
xfrm4_policy_init();
#ifdef CONFIG_SYSCTL
- sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
- xfrm4_policy_table);
+ sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
+ xfrm4_policy_table);
#endif
}
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 36d7437ac054..5728695b5449 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,7 +69,7 @@ config IPV6_OPTIMISTIC_DAD
config INET6_AH
tristate "IPv6: AH transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
@@ -81,7 +81,7 @@ config INET6_AH
config INET6_ESP
tristate "IPv6: ESP transformation"
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_AUTHENC
select CRYPTO_HMAC
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a3bb6077e19..8f6411c97189 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -38,6 +38,8 @@
* status etc.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -66,6 +68,7 @@
#include <net/sock.h>
#include <net/snmp.h>
+#include <net/af_ieee802154.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/ndisc.h>
@@ -149,7 +152,7 @@ static void addrconf_type_change(struct net_device *dev,
unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
+static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_timer(unsigned long data);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_dad_run(struct inet6_dev *idev);
@@ -326,20 +329,19 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
+ pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead) {
- pr_warning("Freeing alive inet6 device %p\n", idev);
+ pr_warn("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
kfree_rcu(idev, rcu);
}
-
EXPORT_SYMBOL(in6_dev_finish_destroy);
-static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
{
struct inet6_dev *ndev;
@@ -372,7 +374,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
if (snmp6_alloc_dev(ndev) < 0) {
ADBG((KERN_WARNING
- "%s(): cannot allocate memory for statistics; dev=%s.\n",
+ "%s: cannot allocate memory for statistics; dev=%s.\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
@@ -382,7 +384,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
if (snmp6_register_dev(ndev) < 0) {
ADBG((KERN_WARNING
- "%s(): cannot create /proc/net/dev_snmp6/%s\n",
+ "%s: cannot create /proc/net/dev_snmp6/%s\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
@@ -400,9 +402,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
- printk(KERN_INFO
- "%s: Disabled Multicast RS\n",
- dev->name);
+ pr_info("%s: Disabled Multicast RS\n", dev->name);
ndev->cnf.rtr_solicits = 0;
}
#endif
@@ -441,7 +441,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
return ndev;
}
-static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
+static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
{
struct inet6_dev *idev;
@@ -542,7 +542,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
WARN_ON(!hlist_unhashed(&ifp->addr_lst));
#ifdef NET_REFCNT_DEBUG
- printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
+ pr_debug("%s\n", __func__);
#endif
in6_dev_put(ifp->idev);
@@ -551,7 +551,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
- pr_warning("Freeing alive inet6 address %p\n", ifp);
+ pr_warn("Freeing alive inet6 address %p\n", ifp);
return;
}
dst_release(&ifp->rt->dst);
@@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ip6_del_rt(rt);
rt = NULL;
} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
- rt->dst.expires = expires;
- rt->rt6i_flags |= RTF_EXPIRES;
+ rt6_set_expires(rt, expires);
}
}
dst_release(&rt->dst);
@@ -842,8 +841,7 @@ retry:
in6_dev_hold(idev);
if (idev->cnf.use_tempaddr <= 0) {
write_unlock(&idev->lock);
- printk(KERN_INFO
- "ipv6_create_tempaddr(): use_tempaddr is disabled.\n");
+ pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
ret = -1;
goto out;
@@ -853,8 +851,8 @@ retry:
idev->cnf.use_tempaddr = -1; /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
- printk(KERN_WARNING
- "ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n");
+ pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
+ __func__);
in6_dev_put(idev);
ret = -1;
goto out;
@@ -864,8 +862,8 @@ retry:
if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
- printk(KERN_WARNING
- "ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n");
+ pr_warn("%s: regeneration of randomized interface id failed\n",
+ __func__);
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
@@ -915,8 +913,7 @@ retry:
if (!ift || IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
- printk(KERN_INFO
- "ipv6_create_tempaddr(): retry temporary address regeneration.\n");
+ pr_info("%s: retry temporary address regeneration\n", __func__);
tmpaddr = &addr;
write_lock(&idev->lock);
goto retry;
@@ -930,7 +927,7 @@ retry:
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
- addrconf_dad_start(ift, 0);
+ addrconf_dad_start(ift);
in6_ifa_put(ift);
in6_dev_put(idev);
out:
@@ -1333,7 +1330,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
rcu_read_unlock();
return onlink;
}
-
EXPORT_SYMBOL(ipv6_chk_prefix);
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1417,9 +1413,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
return;
}
- if (net_ratelimit())
- printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
- ifp->idev->dev->name, &ifp->addr);
+ net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
+ ifp->idev->dev->name, &ifp->addr);
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
struct in6_addr addr;
@@ -1432,7 +1427,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
/* DAD failed for link-local based on MAC address */
idev->cnf.disable_ipv6 = 1;
- printk(KERN_INFO "%s: IPv6 being disabled!\n",
+ pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
}
}
@@ -1517,13 +1512,21 @@ static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
return 0;
}
+static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
+{
+ if (dev->addr_len != IEEE802154_ADDR_LEN)
+ return -1;
+ memcpy(eui, dev->dev_addr, 8);
+ return 0;
+}
+
static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
{
/* XXX: inherit EUI-64 from other interface -- yoshfuji */
if (dev->addr_len != ARCNET_ALEN)
return -1;
memset(eui, 0, 7);
- eui[7] = *(u8*)dev->dev_addr;
+ eui[7] = *(u8 *)dev->dev_addr;
return 0;
}
@@ -1570,7 +1573,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
- case ARPHRD_IEEE802_TR:
return addrconf_ifid_eui48(eui, dev);
case ARPHRD_ARCNET:
return addrconf_ifid_arcnet(eui, dev);
@@ -1580,6 +1582,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
return addrconf_ifid_gre(eui, dev);
+ case ARPHRD_IEEE802154:
+ return addrconf_ifid_eui64(eui, dev);
}
return -1;
}
@@ -1653,9 +1657,8 @@ static void ipv6_regen_rndid(unsigned long data)
idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
idev->cnf.max_desync_factor * HZ;
if (time_before(expires, jiffies)) {
- printk(KERN_WARNING
- "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
- idev->dev->name);
+ pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
+ __func__, idev->dev->name);
goto out;
}
@@ -1668,7 +1671,8 @@ out:
in6_dev_put(idev);
}
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) {
+static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+{
int ret = 0;
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1838,16 +1842,15 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
prefered_lft = ntohl(pinfo->prefered);
if (prefered_lft > valid_lft) {
- if (net_ratelimit())
- printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n");
+ net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
return;
}
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name);
+ net_dbg_ratelimited("addrconf: device %s not configured\n",
+ dev->name);
return;
}
@@ -1887,11 +1890,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
rt = NULL;
} else if (addrconf_finite_timeout(rt_expires)) {
/* not infinity */
- rt->dst.expires = jiffies + rt_expires;
- rt->rt6i_flags |= RTF_EXPIRES;
+ rt6_set_expires(rt, jiffies + rt_expires);
} else {
- rt->rt6i_flags &= ~RTF_EXPIRES;
- rt->dst.expires = 0;
+ rt6_clean_expires(rt);
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -1911,7 +1912,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
/* Try to figure out our local address for this prefix */
if (pinfo->autoconf && in6_dev->cnf.autoconf) {
- struct inet6_ifaddr * ifp;
+ struct inet6_ifaddr *ifp;
struct in6_addr addr;
int create = 0, update_lft = 0;
@@ -1924,9 +1925,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
}
goto ok;
}
- if (net_ratelimit())
- printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n",
- pinfo->prefix_len);
+ net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
+ pinfo->prefix_len);
in6_dev_put(in6_dev);
return;
@@ -1960,7 +1960,7 @@ ok:
update_lft = create = 1;
ifp->cstamp = jiffies;
- addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT);
+ addrconf_dad_start(ifp);
}
if (ifp) {
@@ -2239,7 +2239,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
* that the Optimistic flag should not be set for
* manually configured addresses
*/
- addrconf_dad_start(ifp, 0);
+ addrconf_dad_start(ifp);
in6_ifa_put(ifp);
addrconf_verify(0);
return 0;
@@ -2365,9 +2365,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
}
for_each_netdev(net, dev) {
- struct in_device * in_dev = __in_dev_get_rtnl(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (in_dev && (dev->flags & IFF_UP)) {
- struct in_ifaddr * ifa;
+ struct in_ifaddr *ifa;
int flag = scope;
@@ -2404,7 +2404,7 @@ static void init_loopback(struct net_device *dev)
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init loopback: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2413,7 +2413,7 @@ static void init_loopback(struct net_device *dev)
static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
{
- struct inet6_ifaddr * ifp;
+ struct inet6_ifaddr *ifp;
u32 addr_flags = IFA_F_PERMANENT;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2426,7 +2426,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
- addrconf_dad_start(ifp, 0);
+ addrconf_dad_start(ifp);
in6_ifa_put(ifp);
}
}
@@ -2434,15 +2434,15 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
static void addrconf_dev_config(struct net_device *dev)
{
struct in6_addr addr;
- struct inet6_dev * idev;
+ struct inet6_dev *idev;
ASSERT_RTNL();
if ((dev->type != ARPHRD_ETHER) &&
(dev->type != ARPHRD_FDDI) &&
- (dev->type != ARPHRD_IEEE802_TR) &&
(dev->type != ARPHRD_ARCNET) &&
- (dev->type != ARPHRD_INFINIBAND)) {
+ (dev->type != ARPHRD_INFINIBAND) &&
+ (dev->type != ARPHRD_IEEE802154)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -2472,7 +2472,7 @@ static void addrconf_sit_config(struct net_device *dev)
*/
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init sit: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2502,12 +2502,12 @@ static void addrconf_gre_config(struct net_device *dev)
struct inet6_dev *idev;
struct in6_addr addr;
- pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
+ pr_info("%s(%s)\n", __func__, dev->name);
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
- printk(KERN_DEBUG "init gre: add_dev failed\n");
+ pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2547,7 +2547,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
- printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n");
+ pr_debug("init ip6-ip6: add_linklocal failed\n");
}
/*
@@ -2563,14 +2563,14 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
idev = addrconf_add_dev(dev);
if (IS_ERR(idev)) {
- printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
+ pr_debug("init ip6-ip6: add_dev failed\n");
return;
}
ip6_tnl_add_linklocal(idev);
}
static int addrconf_notify(struct notifier_block *this, unsigned long event,
- void * data)
+ void *data)
{
struct net_device *dev = (struct net_device *) data;
struct inet6_dev *idev = __in6_dev_get(dev);
@@ -2594,9 +2594,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (event == NETDEV_UP) {
if (!addrconf_qdisc_ok(dev)) {
/* device is not ready yet. */
- printk(KERN_INFO
- "ADDRCONF(NETDEV_UP): %s: "
- "link is not ready\n",
+ pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
dev->name);
break;
}
@@ -2621,10 +2619,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
idev->if_flags |= IF_READY;
}
- printk(KERN_INFO
- "ADDRCONF(NETDEV_CHANGE): %s: "
- "link becomes ready\n",
- dev->name);
+ pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
+ dev->name);
run_pending = 1;
}
@@ -2895,8 +2891,7 @@ static void addrconf_rs_timer(unsigned long data)
* Note: we do not support deprecated "all on-link"
* assumption any longer.
*/
- printk(KERN_DEBUG "%s: no IPv6 routers present\n",
- idev->dev->name);
+ pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
}
out:
@@ -2921,7 +2916,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
addrconf_mod_timer(ifp, AC_DAD, rand_num);
}
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
@@ -3794,7 +3789,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
return inet6_dump_addr(skb, cb, type);
}
-static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
+static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
void *arg)
{
struct net *net = sock_net(in_skb->sk);
@@ -3989,14 +3984,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
struct nlattr *nla;
struct ifla_cacheinfo ci;
- NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
+ if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+ goto nla_put_failure;
ci.max_reasm_len = IPV6_MAXPLEN;
ci.tstamp = cstamp_delta(idev->tstamp);
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
- NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
+ if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
if (nla == NULL)
goto nla_put_failure;
@@ -4061,15 +4056,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-
- if (dev->addr_len)
- NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
- NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
- if (dev->ifindex != dev->iflink)
- NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
+ if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+ (dev->addr_len &&
+ nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+ nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+ (dev->ifindex != dev->iflink &&
+ nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+ goto nla_put_failure;
protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
if (protoinfo == NULL)
goto nla_put_failure;
@@ -4182,12 +4175,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
if (pinfo->autoconf)
pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
- NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
-
+ if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+ goto nla_put_failure;
ci.preferred_time = ntohl(pinfo->prefered);
ci.valid_time = ntohl(pinfo->valid);
- NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
-
+ if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -4371,7 +4364,6 @@ static struct addrconf_sysctl_table
{
struct ctl_table_header *sysctl_header;
ctl_table addrconf_vars[DEVCONF_MAX+1];
- char *dev_name;
} addrconf_sysctl __read_mostly = {
.sysctl_header = NULL,
.addrconf_vars = {
@@ -4600,17 +4592,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
{
int i;
struct addrconf_sysctl_table *t;
-
-#define ADDRCONF_CTL_PATH_DEV 3
-
- struct ctl_path addrconf_ctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv6", },
- { .procname = "conf", },
- { /* to be set */ },
- { },
- };
-
+ char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
if (t == NULL)
@@ -4622,27 +4604,15 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
t->addrconf_vars[i].extra2 = net;
}
- /*
- * Make a copy of dev_name, because '.procname' is regarded as const
- * by sysctl and we wouldn't want anyone to change it under our feet
- * (see SIOCSIFNAME).
- */
- t->dev_name = kstrdup(dev_name, GFP_KERNEL);
- if (!t->dev_name)
- goto free;
-
- addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
+ snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
- t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path,
- t->addrconf_vars);
+ t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
if (t->sysctl_header == NULL)
- goto free_procname;
+ goto free;
p->sysctl = t;
return 0;
-free_procname:
- kfree(t->dev_name);
free:
kfree(t);
out:
@@ -4659,7 +4629,6 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
t = p->sysctl;
p->sysctl = NULL;
unregister_net_sysctl_table(t->sysctl_header);
- kfree(t->dev_name);
kfree(t);
}
@@ -4778,8 +4747,8 @@ int __init addrconf_init(void)
err = ipv6_addr_label_init();
if (err < 0) {
- printk(KERN_CRIT "IPv6 Addrconf:"
- " cannot initialize default policy table: %d.\n", err);
+ pr_crit("%s: cannot initialize default policy table: %d\n",
+ __func__, err);
goto out;
}
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e595d7..d051e5f4bf34 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,9 +8,9 @@
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
-static inline unsigned ipv6_addr_scope2type(unsigned scope)
+static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
{
- switch(scope) {
+ switch (scope) {
case IPV6_ADDR_SCOPE_NODELOCAL:
return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
IPV6_ADDR_LOOPBACK);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 2d8ddba9ee58..eb6a63632d3c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -129,7 +129,7 @@ static void ip6addrlbl_free_rcu(struct rcu_head *h)
ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu));
}
-static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p)
+static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p)
{
return atomic_inc_not_zero(&p->refcnt);
}
@@ -141,20 +141,20 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
}
/* Find label */
-static int __ip6addrlbl_match(struct net *net,
- struct ip6addrlbl_entry *p,
- const struct in6_addr *addr,
- int addrtype, int ifindex)
+static bool __ip6addrlbl_match(struct net *net,
+ const struct ip6addrlbl_entry *p,
+ const struct in6_addr *addr,
+ int addrtype, int ifindex)
{
if (!net_eq(ip6addrlbl_net(p), net))
- return 0;
+ return false;
if (p->ifindex && p->ifindex != ifindex)
- return 0;
+ return false;
if (p->addrtype && p->addrtype != addrtype)
- return 0;
+ return false;
if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen))
- return 0;
- return 1;
+ return false;
+ return true;
}
static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
@@ -350,7 +350,7 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
int err = 0;
int i;
- ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
+ ADDRLABEL(KERN_DEBUG "%s\n", __func__);
for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
int ret = ip6addrlbl_add(net,
@@ -456,8 +456,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
- int prefixlen, int ifindex, u32 lseq)
+static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
+ int prefixlen, int ifindex, u32 lseq)
{
struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
ifal->ifal_family = AF_INET6;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ed1b930e75f..e22e6d88bac6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
#include <linux/module.h>
#include <linux/capability.h>
@@ -77,7 +78,7 @@ struct ipv6_params ipv6_defaults = {
.autoconf = 1,
};
-static int disable_ipv6_mod = 0;
+static int disable_ipv6_mod;
module_param_named(disable, disable_ipv6_mod, int, 0444);
MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
@@ -180,7 +181,7 @@ lookup_protocol:
err = 0;
sk->sk_no_check = answer_no_check;
if (INET_PROTOSW_REUSE & answer_flags)
- sk->sk_reuse = 1;
+ sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -256,7 +257,7 @@ out_rcu_unlock:
/* bind for INET6 API */
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -390,7 +391,6 @@ out_unlock:
rcu_read_unlock();
goto out;
}
-
EXPORT_SYMBOL(inet6_bind);
int inet6_release(struct socket *sock)
@@ -408,7 +408,6 @@ int inet6_release(struct socket *sock)
return inet_release(sock);
}
-
EXPORT_SYMBOL(inet6_release);
void inet6_destroy_sock(struct sock *sk)
@@ -419,10 +418,12 @@ void inet6_destroy_sock(struct sock *sk)
/* Release rx options */
- if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
+ skb = xchg(&np->pktoptions, NULL);
+ if (skb != NULL)
kfree_skb(skb);
- if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
+ skb = xchg(&np->rxpmtu, NULL);
+ if (skb != NULL)
kfree_skb(skb);
/* Free flowlabels */
@@ -430,10 +431,10 @@ void inet6_destroy_sock(struct sock *sk)
/* Free tx options */
- if ((opt = xchg(&np->opt, NULL)) != NULL)
+ opt = xchg(&np->opt, NULL);
+ if (opt != NULL)
sock_kfree_s(sk, opt, opt->tot_len);
}
-
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
/*
@@ -443,7 +444,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
- struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr;
+ struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +475,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(*sin);
return 0;
}
-
EXPORT_SYMBOL(inet6_getname);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -482,8 +482,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
- switch(cmd)
- {
+ switch (cmd) {
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *)arg);
@@ -509,7 +508,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
/*NOTREACHED*/
return 0;
}
-
EXPORT_SYMBOL(inet6_ioctl);
const struct proto_ops inet6_stream_ops = {
@@ -615,25 +613,21 @@ out:
return ret;
out_permanent:
- printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
- protocol);
+ pr_err("Attempt to override permanent protocol %d\n", protocol);
goto out;
out_illegal:
- printk(KERN_ERR
- "Ignoring attempt to register invalid socket type %d.\n",
+ pr_err("Ignoring attempt to register invalid socket type %d\n",
p->type);
goto out;
}
-
EXPORT_SYMBOL(inet6_register_protosw);
void
inet6_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
- printk(KERN_ERR
- "Attempt to unregister permanent protocol %d.\n",
+ pr_err("Attempt to unregister permanent protocol %d\n",
p->protocol);
} else {
spin_lock_bh(&inetsw6_lock);
@@ -643,7 +637,6 @@ inet6_unregister_protosw(struct inet_protosw *p)
synchronize_net();
}
}
-
EXPORT_SYMBOL(inet6_unregister_protosw);
int inet6_sk_rebuild_header(struct sock *sk)
@@ -683,13 +676,12 @@ int inet6_sk_rebuild_header(struct sock *sk)
return 0;
}
-
EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
-int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet6_skb_parm *opt = IP6CB(skb);
+ const struct ipv6_pinfo *np = inet6_sk(sk);
+ const struct inet6_skb_parm *opt = IP6CB(skb);
if (np->rxopt.all) {
if ((opt->hop && (np->rxopt.bits.hopopts ||
@@ -701,11 +693,10 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
np->rxopt.bits.osrcrt)) ||
((opt->dst1 || opt->dst0) &&
(np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-
EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
@@ -1070,13 +1061,11 @@ static int __init inet6_init(void)
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
/* Register the socket-side information for inet6_create. */
- for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
if (disable_ipv6_mod) {
- printk(KERN_INFO
- "IPv6: Loaded, but administratively disabled, "
- "reboot required to enable\n");
+ pr_info("Loaded, but administratively disabled, reboot required to enable\n");
goto out;
}
@@ -1111,11 +1100,6 @@ static int __init inet6_init(void)
if (err)
goto out_sock_register_fail;
-#ifdef CONFIG_SYSCTL
- err = ipv6_static_sysctl_register();
- if (err)
- goto static_sysctl_fail;
-#endif
tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
/*
@@ -1242,10 +1226,6 @@ ipmr_fail:
icmp_fail:
unregister_pernet_subsys(&inet6_net_ops);
register_pernet_fail:
-#ifdef CONFIG_SYSCTL
- ipv6_static_sysctl_unregister();
-static_sysctl_fail:
-#endif
sock_unregister(PF_INET6);
rtnl_unregister_all(PF_INET6);
out_sock_register_fail:
@@ -1272,9 +1252,6 @@ static void __exit inet6_exit(void)
/* Disallow any further netlink messages */
rtnl_unregister_all(PF_INET6);
-#ifdef CONFIG_SYSCTL
- ipv6_sysctl_unregister();
-#endif
udpv6_exit();
udplitev6_exit();
tcpv6_exit();
@@ -1302,9 +1279,6 @@ static void __exit inet6_exit(void)
rawv6_exit();
unregister_pernet_subsys(&inet6_net_ops);
-#ifdef CONFIG_SYSCTL
- ipv6_static_sysctl_unregister();
-#endif
proto_unregister(&rawv6_prot);
proto_unregister(&udplitev6_prot);
proto_unregister(&udpv6_prot);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2ae79dbeec2f..f1a4a2c28ed3 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -24,6 +24,8 @@
* This file is derived from net/ipv4/ah.c.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -111,7 +113,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
__alignof__(struct scatterlist));
}
-static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
+static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
{
u8 *opt = (u8 *)opthdr;
int len = ipv6_optlen(opthdr);
@@ -125,7 +127,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
switch (opt[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -143,10 +145,10 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
len -= optlen;
}
if (len == 0)
- return 1;
+ return true;
bad:
- return 0;
+ return false;
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -169,7 +171,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
switch (opt[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -189,8 +191,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
hao = (struct ipv6_destopt_hao *)&opt[off];
if (hao->length != sizeof(hao->addr)) {
- if (net_ratelimit())
- printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
+ net_warn_ratelimited("destopt hao: invalid header length: %u\n",
+ hao->length);
goto bad;
}
final_addr = hao->addr;
@@ -659,9 +661,9 @@ static int ah6_init_state(struct xfrm_state *x)
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_ahash_digestsize(ahash)) {
- printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
- x->aalg->alg_name, crypto_ahash_digestsize(ahash),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ pr_info("AH: %s digestsize %u != %hu\n",
+ x->aalg->alg_name, crypto_ahash_digestsize(ahash),
+ aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@@ -727,12 +729,12 @@ static const struct inet6_protocol ah6_protocol = {
static int __init ah6_init(void)
{
if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
- printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ah6_type, AF_INET6);
return -EAGAIN;
}
@@ -743,10 +745,10 @@ static int __init ah6_init(void)
static void __exit ah6_fini(void)
{
if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
- printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index db00d27ffb16..cdf02be5f191 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -342,7 +342,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
* check if the interface has this anycast address
* called with rcu_read_lock()
*/
-static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
+static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
{
struct inet6_dev *idev;
struct ifacaddr6 *aca;
@@ -356,16 +356,16 @@ static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *add
read_unlock_bh(&idev->lock);
return aca != NULL;
}
- return 0;
+ return false;
}
/*
* check if given interface (or any, if dev==0) has this anycast address
*/
-int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
- const struct in6_addr *addr)
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+ const struct in6_addr *addr)
{
- int found = 0;
+ bool found = false;
rcu_read_lock();
if (dev)
@@ -373,7 +373,7 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
else
for_each_netdev_rcu(net, dev)
if (ipv6_chk_acast_dev(dev, addr)) {
- found = 1;
+ found = true;
break;
}
rcu_read_unlock();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 76832c8dc89d..be2b67d631e5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -22,6 +22,7 @@
#include <linux/ipv6.h>
#include <linux/route.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
@@ -33,9 +34,9 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
+static bool ipv6_mapped_addr_any(const struct in6_addr *a)
{
- return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
+ return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -98,7 +99,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sin.sin_port = usin->sin6_port;
err = ip4_datagram_connect(sk,
- (struct sockaddr*) &sin,
+ (struct sockaddr *) &sin,
sizeof(sin));
ipv4_connected:
@@ -202,6 +203,7 @@ out:
fl6_sock_release(flowlabel);
return err;
}
+EXPORT_SYMBOL_GPL(ip6_datagram_connect);
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
@@ -414,6 +416,7 @@ out_free_skb:
out:
return err;
}
+EXPORT_SYMBOL_GPL(ipv6_recv_error);
/*
* Handle IPV6_RECVPATHMTU
@@ -515,10 +518,10 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
while (off <= opt->lastopt) {
- unsigned len;
+ unsigned int len;
u8 *ptr = nh + off;
- switch(nexthdr) {
+ switch (nexthdr) {
case IPPROTO_DSTOPTS:
nexthdr = ptr[0];
len = (ptr[1] + 1) << 3;
@@ -827,9 +830,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
int tc;
err = -EINVAL;
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
goto exit_f;
- }
tc = *(int *)CMSG_DATA(cmsg);
if (tc < -1 || tc > 0xff)
@@ -846,9 +848,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
int df;
err = -EINVAL;
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
goto exit_f;
- }
df = *(int *)CMSG_DATA(cmsg);
if (df < 0 || df > 1)
@@ -870,3 +871,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
exit_f:
return err;
}
+EXPORT_SYMBOL_GPL(datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1ac7938dd9ec..db1521fcda5b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,8 @@
* This file is derived from net/ipv4/esp.c
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h>
@@ -411,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
-
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
+ unsigned int net_adj;
- if (x->props.mode != XFRM_MODE_TUNNEL) {
- u32 padsize = ((blksize - 1) & 7) + 1;
- mtu -= blksize - padsize;
- mtu += min_t(u32, blksize - padsize, rem);
- }
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ net_adj = sizeof(struct ipv6hdr);
+ else
+ net_adj = 0;
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -442,8 +440,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
- ntohl(esph->spi), &iph->daddr);
+ pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
+ ntohl(esph->spi), &iph->daddr);
xfrm_state_put(x);
}
@@ -651,11 +649,11 @@ static const struct inet6_protocol esp6_protocol = {
static int __init esp6_init(void)
{
if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
- printk(KERN_INFO "ipv6 esp init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&esp6_type, AF_INET6);
return -EAGAIN;
}
@@ -666,9 +664,9 @@ static int __init esp6_init(void)
static void __exit esp6_fini(void)
{
if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
- printk(KERN_INFO "ipv6 esp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(esp6_init);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3d641b6e9b09..6447dc49429f 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -75,7 +75,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
return offset;
switch (opttype) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
default:
@@ -96,14 +96,14 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv);
/*
* Parsing tlv encoded headers.
*
- * Parsing function "func" returns 1, if parsing succeed
- * and 0, if it failed.
+ * Parsing function "func" returns true, if parsing succeed
+ * and false, if it failed.
* It MUST NOT touch skb->h.
*/
struct tlvtype_proc {
int type;
- int (*func)(struct sk_buff *skb, int offset);
+ bool (*func)(struct sk_buff *skb, int offset);
};
/*********************
@@ -112,11 +112,11 @@ struct tlvtype_proc {
/* An unknown option is detected, decide what to do */
-static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
+static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
{
switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
case 0: /* ignore */
- return 1;
+ return true;
case 1: /* drop packet */
break;
@@ -129,21 +129,22 @@ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
break;
case 2: /* send ICMP PARM PROB regardless and drop packet */
icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
- return 0;
+ return false;
}
kfree_skb(skb);
- return 0;
+ return false;
}
/* Parse tlv encoded option header (hop-by-hop or destination) */
-static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
+static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
{
- struct tlvtype_proc *curr;
+ const struct tlvtype_proc *curr;
const unsigned char *nh = skb_network_header(skb);
int off = skb_network_header_len(skb);
int len = (skb_transport_header(skb)[1] + 1) << 3;
+ int padlen = 0;
if (skb_transport_offset(skb) + len > skb_headlen(skb))
goto bad;
@@ -153,13 +154,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
while (len > 0) {
int optlen = nh[off + 1] + 2;
+ int i;
switch (nh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
+ padlen++;
+ if (padlen > 7)
+ goto bad;
break;
case IPV6_TLV_PADN:
+ /* RFC 2460 states that the purpose of PadN is
+ * to align the containing header to multiples
+ * of 8. 7 is therefore the highest valid value.
+ * See also RFC 4942, Section 2.1.9.5.
+ */
+ padlen += optlen;
+ if (padlen > 7)
+ goto bad;
+ /* RFC 4942 recommends receiving hosts to
+ * actively check PadN payload to contain
+ * only zeroes.
+ */
+ for (i = 2; i < optlen; i++) {
+ if (nh[off + i] != 0)
+ goto bad;
+ }
break;
default: /* Other TLV code so scan list */
@@ -170,25 +191,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
/* type specific length/alignment
checks will be performed in the
func(). */
- if (curr->func(skb, off) == 0)
- return 0;
+ if (curr->func(skb, off) == false)
+ return false;
break;
}
}
if (curr->type < 0) {
if (ip6_tlvopt_unknown(skb, off) == 0)
- return 0;
+ return false;
}
+ padlen = 0;
break;
}
off += optlen;
len -= optlen;
}
+ /* This case will not be caught by above check since its padding
+ * length is smaller than 7:
+ * 1 byte NH + 1 byte Length + 6 bytes Padding
+ */
+ if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
+ goto bad;
+
if (len == 0)
- return 1;
+ return true;
bad:
kfree_skb(skb);
- return 0;
+ return false;
}
/*****************************
@@ -196,7 +225,7 @@ bad:
*****************************/
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
+static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
{
struct ipv6_destopt_hao *hao;
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -250,15 +279,15 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
if (skb->tstamp.tv64 == 0)
__net_timestamp(skb);
- return 1;
+ return true;
discard:
kfree_skb(skb);
- return 0;
+ return false;
}
#endif
-static struct tlvtype_proc tlvprocdestopt_lst[] = {
+static const struct tlvtype_proc tlvprocdestopt_lst[] = {
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
{
.type = IPV6_TLV_HAO,
@@ -563,23 +592,23 @@ static inline struct net *ipv6_skb_net(struct sk_buff *skb)
/* Router Alert as of RFC 2711 */
-static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
if (nh[optoff + 1] == 2) {
IP6CB(skb)->ra = optoff;
- return 1;
+ return true;
}
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
nh[optoff + 1]);
kfree_skb(skb);
- return 0;
+ return false;
}
/* Jumbo payload */
-static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
struct net *net = ipv6_skb_net(skb);
@@ -598,13 +627,13 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
- return 0;
+ return false;
}
if (ipv6_hdr(skb)->payload_len) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
- return 0;
+ return false;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
@@ -616,14 +645,14 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
goto drop;
- return 1;
+ return true;
drop:
kfree_skb(skb);
- return 0;
+ return false;
}
-static struct tlvtype_proc tlvprochopopt_lst[] = {
+static const struct tlvtype_proc tlvprochopopt_lst[] = {
{
.type = IPV6_TLV_ROUTERALERT,
.func = ipv6_hop_ra,
@@ -722,7 +751,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
if (opt->hopopt)
ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
}
-
EXPORT_SYMBOL(ipv6_push_nfrag_opts);
void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +766,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
if (opt2) {
- long dif = (char*)opt2 - (char*)opt;
+ long dif = (char *)opt2 - (char *)opt;
memcpy(opt2, opt, opt->tot_len);
if (opt2->hopopt)
- *((char**)&opt2->hopopt) += dif;
+ *((char **)&opt2->hopopt) += dif;
if (opt2->dst0opt)
- *((char**)&opt2->dst0opt) += dif;
+ *((char **)&opt2->dst0opt) += dif;
if (opt2->dst1opt)
- *((char**)&opt2->dst1opt) += dif;
+ *((char **)&opt2->dst1opt) += dif;
if (opt2->srcrt)
- *((char**)&opt2->srcrt) += dif;
+ *((char **)&opt2->srcrt) += dif;
}
return opt2;
}
-
EXPORT_SYMBOL_GPL(ipv6_dup_options);
static int ipv6_renew_option(void *ohdr,
@@ -869,6 +896,7 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
return opt;
}
+EXPORT_SYMBOL_GPL(ipv6_fixup_options);
/**
* fl6_update_dst - update flowi destination address with info given
@@ -892,5 +920,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
return orig;
}
-
EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 72957f4a7c6c..f73d59a14131 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -9,7 +9,7 @@
* find out if nexthdr is a well-known extension header or a protocol
*/
-int ipv6_ext_hdr(u8 nexthdr)
+bool ipv6_ext_hdr(u8 nexthdr)
{
/*
* find out if nexthdr is an extension header or a protocol
@@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr)
(nexthdr == NEXTHDR_NONE) ||
(nexthdr == NEXTHDR_DEST);
}
+EXPORT_SYMBOL(ipv6_ext_hdr);
/*
* Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
*nexthdrp = nexthdr;
return start;
}
-
-EXPORT_SYMBOL(ipv6_ext_hdr);
EXPORT_SYMBOL(ipv6_skip_exthdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b6c573152067..0ff1cfd55bc4 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -22,8 +22,7 @@
#include <net/ip6_route.h>
#include <net/netlink.h>
-struct fib6_rule
-{
+struct fib6_rule {
struct fib_rule common;
struct rt6key src;
struct rt6key dst;
@@ -215,14 +214,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
frh->src_len = rule6->src.plen;
frh->tos = rule6->tclass;
- if (rule6->dst.plen)
- NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr),
- &rule6->dst.addr);
-
- if (rule6->src.plen)
- NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr),
- &rule6->src.addr);
-
+ if ((rule6->dst.plen &&
+ nla_put(skb, FRA_DST, sizeof(struct in6_addr),
+ &rule6->dst.addr)) ||
+ (rule6->src.plen &&
+ nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
+ &rule6->src.addr)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a63429..091a2971c7b7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -29,6 +29,8 @@
* Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -129,7 +131,7 @@ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
* --ANK (980726)
*/
-static int is_ineligible(struct sk_buff *skb)
+static bool is_ineligible(const struct sk_buff *skb)
{
int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
int len = skb->len - ptr;
@@ -137,11 +139,11 @@ static int is_ineligible(struct sk_buff *skb)
__be16 frag_off;
if (len < 0)
- return 1;
+ return true;
ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
if (ptr < 0)
- return 0;
+ return false;
if (nexthdr == IPPROTO_ICMPV6) {
u8 _type, *tp;
tp = skb_header_pointer(skb,
@@ -149,9 +151,9 @@ static int is_ineligible(struct sk_buff *skb)
sizeof(_type), &_type);
if (tp == NULL ||
!(*tp & ICMPV6_INFOMSG_MASK))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -206,14 +208,14 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
* highest-order two bits set to 10
*/
-static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
+static bool opt_unrec(struct sk_buff *skb, __u32 offset)
{
u8 _optval, *op;
offset += skb_network_offset(skb);
op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
if (op == NULL)
- return 1;
+ return true;
return (*op & 0xC0) == 0x80;
}
@@ -498,7 +500,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit,
- np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+ np->tclass, NULL, &fl6, (struct rt6_info *)dst,
MSG_DONTWAIT, np->dontfrag);
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -579,7 +581,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
- (struct rt6_info*)dst, MSG_DONTWAIT,
+ (struct rt6_info *)dst, MSG_DONTWAIT,
np->dontfrag);
if (err) {
@@ -820,9 +822,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
err = inet_ctl_sock_create(&sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- printk(KERN_ERR
- "Failed to initialize the ICMP6 control socket "
- "(err %d).\n",
+ pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
err);
goto fail;
}
@@ -881,7 +881,7 @@ int __init icmpv6_init(void)
return 0;
fail:
- printk(KERN_ERR "Failed to register ICMP6 protocol\n");
+ pr_err("Failed to register ICMP6 protocol\n");
unregister_pernet_subsys(&icmpv6_sk_ops);
return err;
}
@@ -950,7 +950,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
return fatal;
}
-
EXPORT_SYMBOL(icmpv6_err_convert);
#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 02dd203d9eac..e6cee5292a0b 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,7 +28,7 @@
#include <net/inet6_connection_sock.h>
int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb)
+ const struct inet_bind_bucket *tb, bool relax)
{
const struct sock *sk2;
const struct hlist_node *node;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5b27fbcae346..0c220a416626 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -18,6 +18,9 @@
* routing table.
* Ville Nuorvala: Fixed routing subtrees.
*/
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/net.h>
@@ -38,7 +41,7 @@
#define RT6_DEBUG 2
#if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
+#define RT6_TRACE(x...) pr_debug(x)
#else
#define RT6_TRACE(x...) do { ; } while (0)
#endif
@@ -451,12 +454,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
!ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
if (!allow_create) {
if (replace_required) {
- pr_warn("IPv6: Can't replace route, "
- "no match found\n");
+ pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
- pr_warn("IPv6: NLM_F_CREATE should be set "
- "when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
}
goto insert_above;
}
@@ -499,11 +500,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
* That would keep IPv6 consistent with IPv4
*/
if (replace_required) {
- pr_warn("IPv6: Can't replace route, no match found\n");
+ pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
- pr_warn("IPv6: NLM_F_CREATE should be set "
- "when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
}
/*
* We walked to the bottom of tree.
@@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
&rt->rt6i_gateway)) {
if (!(iter->rt6i_flags & RTF_EXPIRES))
return -EEXIST;
- iter->dst.expires = rt->dst.expires;
- if (!(rt->rt6i_flags & RTF_EXPIRES)) {
- iter->rt6i_flags &= ~RTF_EXPIRES;
- iter->dst.expires = 0;
- }
+ if (!(rt->rt6i_flags & RTF_EXPIRES))
+ rt6_clean_expires(iter);
+ else
+ rt6_set_expires(iter, rt->dst.expires);
return -EEXIST;
}
}
@@ -697,7 +696,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
*/
if (!replace) {
if (!add)
- pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+ pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
rt->dst.rt6_next = iter;
@@ -716,7 +715,7 @@ add:
if (!found) {
if (add)
goto add;
- pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+ pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
return -ENOENT;
}
*ins = rt;
@@ -769,7 +768,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
replace_required = 1;
}
if (!allow_create && !replace_required)
- pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
+ pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
@@ -1421,7 +1420,8 @@ static int fib6_clean_node(struct fib6_walker_t *w)
res = fib6_del(rt, &info);
if (res) {
#if RT6_DEBUG >= 2
- printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
+ pr_debug("%s: del failed: rt=%p@%p err=%d\n",
+ __func__, rt, rt->rt6i_node, res);
#endif
continue;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b7867a1215b1..9772fbd8a3f5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -294,6 +294,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
opt_space->opt_flen = fopt->opt_flen;
return opt_space;
}
+EXPORT_SYMBOL_GPL(fl6_merge_options);
static unsigned long check_linger(unsigned long ttl)
{
@@ -432,32 +433,32 @@ static int mem_check(struct sock *sk)
return 0;
}
-static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
+static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
{
if (h1 == h2)
- return 0;
+ return false;
if (h1 == NULL || h2 == NULL)
- return 1;
+ return true;
if (h1->hdrlen != h2->hdrlen)
- return 1;
+ return true;
return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
}
-static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
+static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
{
if (o1 == o2)
- return 0;
+ return false;
if (o1 == NULL || o2 == NULL)
- return 1;
+ return true;
if (o1->opt_nflen != o2->opt_nflen)
- return 1;
+ return true;
if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
- return 1;
+ return true;
if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
- return 1;
+ return true;
if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
- return 1;
- return 0;
+ return true;
+ return false;
}
static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
@@ -705,9 +706,9 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
struct ip6_flowlabel *fl = v;
seq_printf(seq,
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
- (unsigned)ntohl(fl->label),
+ (unsigned int)ntohl(fl->label),
fl->share,
- (unsigned)fl->owner,
+ (int)fl->owner,
atomic_read(&fl->users),
fl->linger/HZ,
(long)(fl->expires - jiffies)/HZ,
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 1ca5d45a12e8..21a15dfe4a9e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -170,7 +170,8 @@ static int ip6_input_finish(struct sk_buff *skb)
{
const struct inet6_protocol *ipprot;
unsigned int nhoff;
- int nexthdr, raw;
+ int nexthdr;
+ bool raw;
u8 hash;
struct inet6_dev *idev;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -251,7 +252,7 @@ int ip6_input(struct sk_buff *skb)
int ip6_mc_input(struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
- int deliver;
+ bool deliver;
IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
@@ -287,7 +288,7 @@ int ip6_mc_input(struct sk_buff *skb)
* is for MLD (0x0000).
*/
if ((ptr[2] | ptr[3]) == 0) {
- deliver = 0;
+ deliver = false;
if (!ipv6_ext_hdr(nexthdr)) {
/* BUG */
@@ -312,7 +313,7 @@ int ip6_mc_input(struct sk_buff *skb)
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MLD2_REPORT:
- deliver = 1;
+ deliver = true;
break;
}
goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b7ca46161cb9..17b8c67998bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,7 +210,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
skb_set_owner_w(skb, sk);
}
@@ -252,8 +252,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
dst->dev, dst_output);
}
- if (net_ratelimit())
- printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
+ net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
@@ -644,7 +643,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
- if (!skb->local_df && skb->len > mtu) {
+ if (unlikely(!skb->local_df && skb->len > mtu)) {
+ if (skb->sk && dst_allfrag(skb_dst(skb)))
+ sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -789,6 +791,10 @@ slow_path_clean:
}
slow_path:
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
+ skb_checksum_help(skb))
+ goto fail;
+
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
@@ -889,7 +895,7 @@ slow_path:
}
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGOKS);
- kfree_skb(skb);
+ consume_skb(skb);
return err;
fail:
@@ -1181,6 +1187,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
}
+static void ip6_append_data_mtu(int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt)
+{
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+ *mtu = *mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+ *mtu = dst_mtu(rt->dst.path);
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+ }
+}
+
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
@@ -1190,7 +1219,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
- struct sk_buff *skb;
+ struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen;
int exthdrlen;
int dst_exthdrlen;
@@ -1199,7 +1228,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int copy;
int err;
int offset = 0;
- int csummode = CHECKSUM_NONE;
__u8 tx_flags = 0;
if (flags&MSG_PROBE)
@@ -1248,8 +1276,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
inet->cork.fl.u.ip6 = *fl6;
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ if (rt->dst.flags & DST_XFRM_TUNNEL)
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ else
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
@@ -1345,25 +1377,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
- struct sk_buff *skb_prev;
alloc_new_skb:
- skb_prev = skb;
-
/* There's no room in the current skb */
- if (skb_prev)
- fraggap = skb_prev->len - maxfraglen;
+ if (skb)
+ fraggap = skb->len - maxfraglen;
else
fraggap = 0;
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt);
+
+ skb_prev = skb;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
- if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
- datalen = maxfraglen - fragheaderlen;
- fraglen = datalen + fragheaderlen;
+ if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+ datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
@@ -1372,13 +1406,16 @@ alloc_new_skb:
alloclen += dst_exthdrlen;
- /*
- * The last fragment gets additional space at tail.
- * Note: we overallocate on fragments with MSG_MODE
- * because we have no idea if we're the last one.
- */
- if (datalen == length + fraggap)
- alloclen += rt->dst.trailer_len;
+ if (datalen != length + fraggap) {
+ /*
+ * this is not the last fragment, the trailer
+ * space is regarded as data space.
+ */
+ datalen += rt->dst.trailer_len;
+ }
+
+ alloclen += rt->dst.trailer_len;
+ fraglen = datalen + fragheaderlen;
/*
* We just reserve space for fragment header.
@@ -1412,7 +1449,7 @@ alloc_new_skb:
/*
* Fill in the control structures
*/
- skb->ip_summed = csummode;
+ skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
@@ -1455,7 +1492,6 @@ alloc_new_skb:
transhdrlen = 0;
exthdrlen = 0;
dst_exthdrlen = 0;
- csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue
@@ -1535,6 +1571,7 @@ error:
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
return err;
}
+EXPORT_SYMBOL_GPL(ip6_append_data);
static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
{
@@ -1638,6 +1675,7 @@ error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
goto out;
}
+EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
void ip6_flush_pending_frames(struct sock *sk)
{
@@ -1652,3 +1690,4 @@ void ip6_flush_pending_frames(struct sock *sk)
ip6_cork_release(inet_sk(sk), inet6_sk(sk));
}
+EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6a09cd..c9015fad8d65 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -18,6 +18,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_NETDEV("ip6tnl0");
#ifdef IP6_TNL_DEBUG
-#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
+#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
#else
#define IP6_TNL_TRACE(x...) do {;} while(0)
#endif
@@ -198,7 +200,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
{
const struct in6_addr *remote = &p->raddr;
const struct in6_addr *local = &p->laddr;
- unsigned h = 0;
+ unsigned int h = 0;
int prio = 0;
if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
@@ -460,19 +462,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Path to destination invalid "
- "or inactive!\n", t->parms.name);
+ net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+ t->parms.name);
rel_msg = 1;
break;
case ICMPV6_TIME_EXCEED:
if ((*code) == ICMPV6_EXC_HOPLIMIT) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Too small hop limit or "
- "routing loop in tunnel!\n",
- t->parms.name);
+ net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
break;
@@ -484,17 +481,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Too small encapsulation "
- "limit or routing loop in "
- "tunnel!\n", t->parms.name);
+ net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
- } else if (net_ratelimit()) {
- printk(KERN_WARNING
- "%s: Recipient unable to parse tunneled "
- "packet!\n ", t->parms.name);
+ } else {
+ net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+ t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
@@ -825,7 +818,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
* 0 else
**/
-static inline int
+static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
{
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
@@ -845,15 +838,12 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
ldev = dev_get_by_index_rcu(net, p->link);
if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
- printk(KERN_WARNING
- "%s xmit: Local address not yet configured!\n",
- p->name);
+ pr_warn("%s xmit: Local address not yet configured!\n",
+ p->name);
else if (!ipv6_addr_is_multicast(&p->raddr) &&
unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
- printk(KERN_WARNING
- "%s xmit: Routing loop! "
- "Remote address found on this node!\n",
- p->name);
+ pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
+ p->name);
else
ret = 1;
rcu_read_unlock();
@@ -919,10 +909,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (tdev == dev) {
stats->collisions++;
- if (net_ratelimit())
- printk(KERN_WARNING
- "%s: Local routing loop detected!\n",
- t->parms.name);
+ net_warn_ratelimited("%s: Local routing loop detected!\n",
+ t->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - sizeof (*ipv6h);
@@ -954,7 +942,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = new_skb;
}
skb_dst_drop(skb);
@@ -1553,13 +1541,13 @@ static int __init ip6_tunnel_init(void)
err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
if (err < 0) {
- printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
+ pr_err("%s: can't register ip4ip6\n", __func__);
goto out_ip4ip6;
}
err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
if (err < 0) {
- printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
+ pr_err("%s: can't register ip6ip6\n", __func__);
goto out_ip6ip6;
}
@@ -1580,10 +1568,10 @@ out_pernet:
static void __exit ip6_tunnel_cleanup(void)
{
if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
- printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
+ pr_info("%s: can't deregister ip4ip6\n", __func__);
if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
- printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
+ pr_info("%s: can't deregister ip6ip6\n", __func__);
unregister_pernet_device(&ip6_tnl_net_ops);
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362e0af5..b15dc08643a4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1147,8 +1147,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
*/
ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
if (ret < 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
+ net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
kfree_skb(skb);
}
@@ -1351,7 +1350,7 @@ int __init ip6_mr_init(void)
goto reg_notif_fail;
#ifdef CONFIG_IPV6_PIMSM_V2
if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
- printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
+ pr_err("%s: can't add PIM protocol\n", __func__);
err = -EAGAIN;
goto add_proto_fail;
}
@@ -2215,14 +2214,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
rtm->rtm_src_len = 128;
rtm->rtm_tos = 0;
rtm->rtm_table = mrt->id;
- NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+ if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+ goto nla_put_failure;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = RTPROT_UNSPEC;
rtm->rtm_flags = 0;
- NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
- NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
-
+ if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
+ nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+ goto nla_put_failure;
if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
goto nla_put_failure;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index bba658d9a03c..5cb75bfe45b1 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -30,6 +30,9 @@
* The decompression of IP datagram MUST be done after the reassembly,
* AH/ESP processing.
*/
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -69,8 +72,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n",
- spi, &iph->daddr);
+ pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
+ spi, &iph->daddr);
xfrm_state_put(x);
}
@@ -190,11 +193,11 @@ static const struct inet6_protocol ipcomp6_protocol =
static int __init ipcomp6_init(void)
{
if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) {
- printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n");
+ pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
- printk(KERN_INFO "ipcomp6 init: can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ipcomp6_type, AF_INET6);
return -EAGAIN;
}
@@ -204,9 +207,9 @@ static int __init ipcomp6_init(void)
static void __exit ipcomp6_fini(void)
{
if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
- printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n");
+ pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
- printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n");
+ pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ipcomp6_init);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63dd1f89ed7d..ba6d13d1f1e1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -678,7 +678,6 @@ done:
}
case MCAST_MSFILTER:
{
- extern int sysctl_mld_max_msf;
struct group_filter *gsf;
if (optlen < GROUP_FILTER_SIZE(0))
@@ -943,7 +942,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
}
static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen, unsigned flags)
+ char __user *optval, int __user *optlen, unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
int len;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index b2869cab2092..6d0f5dc8e3a6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -606,13 +606,13 @@ done:
return err;
}
-int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
- const struct in6_addr *src_addr)
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc;
struct ip6_sf_socklist *psl;
- int rv = 1;
+ bool rv = true;
rcu_read_lock();
for_each_pmc_rcu(np, mc) {
@@ -621,7 +621,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
}
if (!mc) {
rcu_read_unlock();
- return 1;
+ return true;
}
read_lock(&mc->sflock);
psl = mc->sflist;
@@ -635,9 +635,9 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
break;
}
if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
- rv = 0;
+ rv = false;
if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
- rv = 0;
+ rv = false;
}
read_unlock(&mc->sflock);
rcu_read_unlock();
@@ -931,15 +931,15 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
/*
* identify MLD packets for MLD filter exceptions
*/
-int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
+bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
{
struct icmp6hdr *pic;
if (nexthdr != IPPROTO_ICMPV6)
- return 0;
+ return false;
if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
- return 0;
+ return false;
pic = icmp6_hdr(skb);
@@ -948,22 +948,22 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MLD2_REPORT:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
/*
* check if the interface/address pair is valid
*/
-int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
- const struct in6_addr *src_addr)
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr)
{
struct inet6_dev *idev;
struct ifmcaddr6 *mc;
- int rv = 0;
+ bool rv = false;
rcu_read_lock();
idev = __in6_dev_get(dev);
@@ -990,7 +990,7 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
spin_unlock_bh(&mc->mca_lock);
} else
- rv = 1; /* don't filter unspecified source */
+ rv = true; /* don't filter unspecified source */
}
read_unlock_bh(&idev->lock);
}
@@ -1046,8 +1046,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
}
/* mark EXCLUDE-mode sources */
-static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
- const struct in6_addr *srcs)
+static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
+ const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
@@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->mca_sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
- continue;
+ break;
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
scount++;
break;
@@ -1070,12 +1070,12 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
}
pmc->mca_flags &= ~MAF_GSQUERY;
if (scount == nsrcs) /* all sources excluded */
- return 0;
- return 1;
+ return false;
+ return true;
}
-static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
- const struct in6_addr *srcs)
+static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
+ const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
@@ -1099,10 +1099,10 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
}
if (!scount) {
pmc->mca_flags &= ~MAF_GSQUERY;
- return 0;
+ return false;
}
pmc->mca_flags |= MAF_GSQUERY;
- return 1;
+ return true;
}
/* called with rcu_read_lock() */
@@ -1276,17 +1276,17 @@ int igmp6_event_report(struct sk_buff *skb)
return 0;
}
-static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
- int gdeleted, int sdeleted)
+static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
+ int gdeleted, int sdeleted)
{
switch (type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
if (pmc->mca_sfmode == MCAST_INCLUDE)
- return 1;
+ return true;
/* don't include if this source is excluded
* in all filters
*/
@@ -1295,29 +1295,29 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
- return 0;
+ return false;
case MLD2_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
return psf->sf_count[MCAST_INCLUDE] != 0;
case MLD2_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
- return 0;
+ return false;
if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
- return 0;
+ return false;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case MLD2_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
- return 0;
+ return false;
return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
case MLD2_BLOCK_OLD_SOURCES:
if (pmc->mca_sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
- return 0;
+ return false;
}
static int
@@ -2627,8 +2627,7 @@ static int __net_init igmp6_net_init(struct net *net)
err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- printk(KERN_ERR
- "Failed to initialize the IGMP6 control socket (err %d).\n",
+ pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
err);
goto out;
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 7e1e0fbfef21..5b087c31d87b 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -22,6 +22,8 @@
* Masahide NAKAMURA @USAGI
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/time.h>
@@ -44,7 +46,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
if (!data)
return NULL;
if (padlen == 1) {
- data[0] = IPV6_TLV_PAD0;
+ data[0] = IPV6_TLV_PAD1;
} else if (padlen > 1) {
data[0] = IPV6_TLV_PADN;
data[1] = padlen - 2;
@@ -307,13 +309,12 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
static int mip6_destopt_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
- printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
- x->id.spi);
+ pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- printk(KERN_INFO "%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ pr_info("%s: state's mode is not %u: %u\n",
+ __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
@@ -443,13 +444,12 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
static int mip6_rthdr_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
- printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
- x->id.spi);
+ pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- printk(KERN_INFO "%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ pr_info("%s: state's mode is not %u: %u\n",
+ __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
@@ -481,18 +481,18 @@ static const struct xfrm_type mip6_rthdr_type =
static int __init mip6_init(void)
{
- printk(KERN_INFO "Mobile IPv6\n");
+ pr_info("Mobile IPv6\n");
if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
- printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__);
+ pr_info("%s: can't add xfrm type(destopt)\n", __func__);
goto mip6_destopt_xfrm_fail;
}
if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
- printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__);
+ pr_info("%s: can't add xfrm type(rthdr)\n", __func__);
goto mip6_rthdr_xfrm_fail;
}
if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
- printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__);
+ pr_info("%s: can't add rawv6 mh filter\n", __func__);
goto mip6_rawv6_mh_fail;
}
@@ -510,11 +510,11 @@ static int __init mip6_init(void)
static void __exit mip6_fini(void)
{
if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
- printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__);
+ pr_info("%s: can't remove rawv6 mh filter\n", __func__);
if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
- printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__);
+ pr_info("%s: can't remove xfrm type(rthdr)\n", __func__);
if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
- printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__);
+ pr_info("%s: can't remove xfrm type(destopt)\n", __func__);
}
module_init(mip6_init);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3dcdb81ec3e8..54f62d3b8dd6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -15,6 +15,7 @@
/*
* Changes:
*
+ * Alexey I. Froloff : RFC6106 (DNSSL) support
* Pierre Ynard : export userland ND options
* through netlink (RDNSS support)
* Lars Fenneberg : fixed MTU setting on receipt
@@ -26,27 +27,7 @@
* YOSHIFUJI Hideaki @USAGI : Verify ND options properly
*/
-/* Set to 3 to get tracing... */
-#define ND_DEBUG 1
-
-#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0)
-#define ND_NOPRINTK(x...) do { ; } while(0)
-#define ND_PRINTK0 ND_PRINTK
-#define ND_PRINTK1 ND_NOPRINTK
-#define ND_PRINTK2 ND_NOPRINTK
-#define ND_PRINTK3 ND_NOPRINTK
-#if ND_DEBUG >= 1
-#undef ND_PRINTK1
-#define ND_PRINTK1 ND_PRINTK
-#endif
-#if ND_DEBUG >= 2
-#undef ND_PRINTK2
-#define ND_PRINTK2 ND_PRINTK
-#endif
-#if ND_DEBUG >= 3
-#undef ND_PRINTK3
-#define ND_PRINTK3 ND_PRINTK
-#endif
+#define pr_fmt(fmt) "ICMPv6: " fmt
#include <linux/module.h>
#include <linux/errno.h>
@@ -91,6 +72,15 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
+/* Set to 3 to get tracing... */
+#define ND_DEBUG 1
+
+#define ND_PRINTK(val, level, fmt, ...) \
+do { \
+ if (val <= ND_DEBUG) \
+ net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
+} while (0)
+
static u32 ndisc_hash(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
@@ -228,7 +218,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
{
- return opt->nd_opt_type == ND_OPT_RDNSS;
+ return opt->nd_opt_type == ND_OPT_RDNSS ||
+ opt->nd_opt_type == ND_OPT_DNSSL;
}
static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -263,10 +254,9 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
case ND_OPT_MTU:
case ND_OPT_REDIRECT_HDR:
if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
- ND_PRINTK2(KERN_WARNING
- "%s(): duplicated ND6 option found: type=%d\n",
- __func__,
- nd_opt->nd_opt_type);
+ ND_PRINTK(2, warn,
+ "%s: duplicated ND6 option found: type=%d\n",
+ __func__, nd_opt->nd_opt_type);
} else {
ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
}
@@ -294,10 +284,11 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
* to accommodate future extension to the
* protocol.
*/
- ND_PRINTK2(KERN_NOTICE
- "%s(): ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type, nd_opt->nd_opt_len);
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
}
opt_len -= l;
@@ -325,9 +316,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
case ARPHRD_FDDI:
ipv6_eth_mc_map(addr, buf);
return 0;
- case ARPHRD_IEEE802_TR:
- ipv6_tr_mc_map(addr,buf);
- return 0;
case ARPHRD_ARCNET:
ipv6_arcnet_mc_map(addr, buf);
return 0;
@@ -360,7 +348,7 @@ static int ndisc_constructor(struct neighbour *neigh)
struct net_device *dev = neigh->dev;
struct inet6_dev *in6_dev;
struct neigh_parms *parms;
- int is_multicast = ipv6_addr_is_multicast(addr);
+ bool is_multicast = ipv6_addr_is_multicast(addr);
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
@@ -456,9 +444,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
len + hlen + tlen),
1, &err);
if (!skb) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
- __func__, err);
+ ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n",
+ __func__, err);
return NULL;
}
@@ -694,8 +681,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
if ((probes -= neigh->parms->ucast_probes) < 0) {
if (!(neigh->nud_state & NUD_VALID)) {
- ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n",
- __func__, target);
+ ND_PRINTK(1, dbg,
+ "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+ __func__, target);
}
ndisc_send_ns(dev, neigh, target, target, saddr);
} else if ((probes -= neigh->parms->app_probes) < 0) {
@@ -737,12 +725,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
struct inet6_dev *idev = NULL;
struct neighbour *neigh;
int dad = ipv6_addr_any(saddr);
- int inc;
+ bool inc;
int is_router = -1;
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: multicast target address");
+ ND_PRINTK(2, warn, "NS: multicast target address\n");
return;
}
@@ -755,22 +742,20 @@ static void ndisc_recv_ns(struct sk_buff *skb)
daddr->s6_addr32[1] == htonl(0x00000000) &&
daddr->s6_addr32[2] == htonl(0x00000001) &&
daddr->s6_addr [12] == 0xff )) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: bad DAD packet (wrong destination)\n");
+ ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
return;
}
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid ND options\n");
+ ND_PRINTK(2, warn, "NS: invalid ND options\n");
return;
}
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "NS: invalid link-layer address length\n");
return;
}
@@ -780,8 +765,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
* in the message.
*/
if (dad) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: bad DAD packet (link-layer address option)\n");
+ ND_PRINTK(2, warn,
+ "NS: bad DAD packet (link-layer address option)\n");
return;
}
}
@@ -793,20 +778,6 @@ static void ndisc_recv_ns(struct sk_buff *skb)
if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
if (dad) {
- if (dev->type == ARPHRD_IEEE802_TR) {
- const unsigned char *sadr;
- sadr = skb_mac_header(skb);
- if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
- sadr[9] == dev->dev_addr[1] &&
- sadr[10] == dev->dev_addr[2] &&
- sadr[11] == dev->dev_addr[3] &&
- sadr[12] == dev->dev_addr[4] &&
- sadr[13] == dev->dev_addr[5]) {
- /* looped-back to us */
- goto out;
- }
- }
-
/*
* We are colliding with another node
* who is doing DAD
@@ -913,34 +884,30 @@ static void ndisc_recv_na(struct sk_buff *skb)
struct neighbour *neigh;
if (skb->len < sizeof(struct nd_msg)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: packet too short\n");
+ ND_PRINTK(2, warn, "NA: packet too short\n");
return;
}
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: target address is multicast.\n");
+ ND_PRINTK(2, warn, "NA: target address is multicast\n");
return;
}
if (ipv6_addr_is_multicast(daddr) &&
msg->icmph.icmp6_solicited) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: solicited NA is multicasted.\n");
+ ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
return;
}
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NS: invalid ND option\n");
+ ND_PRINTK(2, warn, "NS: invalid ND option\n");
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NA: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "NA: invalid link-layer address length\n");
return;
}
}
@@ -961,9 +928,9 @@ static void ndisc_recv_na(struct sk_buff *skb)
unsolicited advertisement.
*/
if (skb->pkt_type != PACKET_LOOPBACK)
- ND_PRINTK1(KERN_WARNING
- "ICMPv6 NA: someone advertises our address %pI6 on %s!\n",
- &ifp->addr, ifp->idev->dev->name);
+ ND_PRINTK(1, warn,
+ "NA: someone advertises our address %pI6 on %s!\n",
+ &ifp->addr, ifp->idev->dev->name);
in6_ifa_put(ifp);
return;
}
@@ -1025,8 +992,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
if (!idev) {
- if (net_ratelimit())
- ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
+ ND_PRINTK(1, err, "RS: can't find in6 device\n");
return;
}
@@ -1043,8 +1009,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
/* Parse ND options */
if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) {
- if (net_ratelimit())
- ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n");
+ ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
goto out;
}
@@ -1099,8 +1064,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
- NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
- &ipv6_hdr(ra)->saddr);
+ if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
+ &ipv6_hdr(ra)->saddr))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
@@ -1141,20 +1107,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: source address is not link-local.\n");
+ ND_PRINTK(2, warn, "RA: source address is not link-local\n");
return;
}
if (optlen < 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: packet too short\n");
+ ND_PRINTK(2, warn, "RA: packet too short\n");
return;
}
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: from host or unauthorized router\n");
+ ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
return;
}
#endif
@@ -1165,15 +1128,13 @@ static void ndisc_router_discovery(struct sk_buff *skb)
in6_dev = __in6_dev_get(skb->dev);
if (in6_dev == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: can't find inet6 device for %s.\n",
- skb->dev->name);
+ ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
+ skb->dev->name);
return;
}
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMP6 RA: invalid ND options\n");
+ ND_PRINTK(2, warn, "RA: invalid ND options\n");
return;
}
@@ -1226,9 +1187,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt) {
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (!neigh) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() got default router without neighbour.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s got default router without neighbour\n",
+ __func__);
dst_release(&rt->dst);
return;
}
@@ -1239,22 +1200,21 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (rt == NULL && lifetime) {
- ND_PRINTK3(KERN_DEBUG
- "ICMPv6 RA: adding default router.\n");
+ ND_PRINTK(3, dbg, "RA: adding default router\n");
rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
if (rt == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() failed to add default route.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s failed to add default route\n",
+ __func__);
return;
}
neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (neigh == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 RA: %s() got default router without neighbour.\n",
- __func__);
+ ND_PRINTK(0, err,
+ "RA: %s got default router without neighbour\n",
+ __func__);
dst_release(&rt->dst);
return;
}
@@ -1264,8 +1224,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (rt)
- rt->dst.expires = jiffies + (HZ * lifetime);
-
+ rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
if (rt)
@@ -1323,8 +1282,8 @@ skip_linkparms:
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
skb->dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "RA: invalid link-layer address length\n");
goto out;
}
}
@@ -1388,9 +1347,7 @@ skip_routeinfo:
mtu = ntohl(n);
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid mtu: %d\n",
- mtu);
+ ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
} else if (in6_dev->cnf.mtu6 != mtu) {
in6_dev->cnf.mtu6 = mtu;
@@ -1411,8 +1368,7 @@ skip_routeinfo:
}
if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 RA: invalid RA options");
+ ND_PRINTK(2, warn, "RA: invalid RA options\n");
}
out:
if (rt)
@@ -1437,15 +1393,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
case NDISC_NODETYPE_NODEFAULT:
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: from host or unauthorized router\n");
+ ND_PRINTK(2, warn,
+ "Redirect: from host or unauthorized router\n");
return;
}
#endif
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: source address is not link-local.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: source address is not link-local\n");
return;
}
@@ -1453,8 +1409,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
if (optlen < 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: packet too short\n");
+ ND_PRINTK(2, warn, "Redirect: packet too short\n");
return;
}
@@ -1463,8 +1418,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
dest = target + 1;
if (ipv6_addr_is_multicast(dest)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: destination address is multicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: destination address is multicast\n");
return;
}
@@ -1472,8 +1427,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
on_link = 1;
} else if (ipv6_addr_type(target) !=
(IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local unicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: target address is not link-local unicast\n");
return;
}
@@ -1489,16 +1444,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
*/
if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: invalid ND options\n");
+ ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
skb->dev);
if (!lladdr) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: invalid link-layer address length\n");
+ ND_PRINTK(2, warn,
+ "Redirect: invalid link-layer address length\n");
return;
}
}
@@ -1533,16 +1487,15 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: no link-local address on %s\n",
- dev->name);
+ ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
+ dev->name);
return;
}
if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: target address is not link-local unicast.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: target address is not link-local unicast\n");
return;
}
@@ -1561,8 +1514,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & RTF_GATEWAY) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: destination is not a neighbour.\n");
+ ND_PRINTK(2, warn,
+ "Redirect: destination is not a neighbour\n");
goto release;
}
if (!rt->rt6i_peer)
@@ -1573,8 +1526,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
if (dev->addr_len) {
struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
if (!neigh) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 Redirect: no neigh for target address\n");
+ ND_PRINTK(2, warn,
+ "Redirect: no neigh for target address\n");
goto release;
}
@@ -1602,9 +1555,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
len + hlen + tlen),
1, &err);
if (buff == NULL) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n",
- __func__, err);
+ ND_PRINTK(0, err,
+ "Redirect: %s failed to allocate an skb, err=%d\n",
+ __func__, err);
goto release;
}
@@ -1689,16 +1642,14 @@ int ndisc_rcv(struct sk_buff *skb)
__skb_push(skb, skb->data - skb_transport_header(skb));
if (ipv6_hdr(skb)->hop_limit != 255) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NDISC: invalid hop-limit: %d\n",
- ipv6_hdr(skb)->hop_limit);
+ ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
+ ipv6_hdr(skb)->hop_limit);
return 0;
}
if (msg->icmph.icmp6_code != 0) {
- ND_PRINTK2(KERN_WARNING
- "ICMPv6 NDISC: invalid ICMPv6 code: %d\n",
- msg->icmph.icmp6_code);
+ ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
+ msg->icmph.icmp6_code);
return 0;
}
@@ -1765,11 +1716,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
static int warned;
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
- printk(KERN_WARNING
- "process `%s' is using deprecated sysctl (%s) "
- "net.ipv6.neigh.%s.%s; "
- "Use net.ipv6.neigh.%s.%s_ms "
- "instead.\n",
+ pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
warncomm, func,
dev_name, ctl->procname,
dev_name, ctl->procname);
@@ -1823,9 +1770,9 @@ static int __net_init ndisc_net_init(struct net *net)
err = inet_ctl_sock_create(&sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- ND_PRINTK0(KERN_ERR
- "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n",
- err);
+ ND_PRINTK(0, err,
+ "NDISC: Failed to initialize the control socket (err %d)\n",
+ err);
return err;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index d33cddd16fbb..10135342799e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,28 +25,6 @@ config NF_CONNTRACK_IPV6
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_QUEUE
- tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)"
- depends on INET && IPV6 && NETFILTER
- depends on NETFILTER_ADVANCED
- ---help---
-
- This option adds a queue handler to the kernel for IPv6
- packets which enables users to receive the filtered packets
- with QUEUE target using libipq.
-
- This option enables the old IPv6-only "ip6_queue" implementation
- which has been obsoleted by the new "nfnetlink_queue" code (see
- CONFIG_NETFILTER_NETLINK_QUEUE).
-
- (C) Fernando Anton 2001
- IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
- Universidad Carlos III de Madrid
- Universidad Politecnica de Alcala de Henares
- email: <fanton@it.uc3m.es>.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d4dfd0a21097..534d3f216f7b 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -6,7 +6,6 @@
obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
-obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
deleted file mode 100644
index a34c9e4c792c..000000000000
--- a/net/ipv6/netfilter/ip6_queue.c
+++ /dev/null
@@ -1,641 +0,0 @@
-/*
- * This is a module which is used for queueing IPv6 packets and
- * communicating with userspace via netlink.
- *
- * (C) 2001 Fernando Anton, this code is GPL.
- * IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
- * Universidad Carlos III de Madrid - Leganes (Madrid) - Spain
- * Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain
- * email: fanton@it.uc3m.es
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ipv6.h>
-#include <linux/notifier.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netlink.h>
-#include <linux/spinlock.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/netfilter/nf_queue.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-#define IPQ_QMAX_DEFAULT 1024
-#define IPQ_PROC_FS_NAME "ip6_queue"
-#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
-
-typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
-
-static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
-static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_SPINLOCK(queue_lock);
-static int peer_pid __read_mostly;
-static unsigned int copy_range __read_mostly;
-static unsigned int queue_total;
-static unsigned int queue_dropped = 0;
-static unsigned int queue_user_dropped = 0;
-static struct sock *ipqnl __read_mostly;
-static LIST_HEAD(queue_list);
-static DEFINE_MUTEX(ipqnl_mutex);
-
-static inline void
-__ipq_enqueue_entry(struct nf_queue_entry *entry)
-{
- list_add_tail(&entry->list, &queue_list);
- queue_total++;
-}
-
-static inline int
-__ipq_set_mode(unsigned char mode, unsigned int range)
-{
- int status = 0;
-
- switch(mode) {
- case IPQ_COPY_NONE:
- case IPQ_COPY_META:
- copy_mode = mode;
- copy_range = 0;
- break;
-
- case IPQ_COPY_PACKET:
- if (range > 0xFFFF)
- range = 0xFFFF;
- copy_range = range;
- copy_mode = mode;
- break;
-
- default:
- status = -EINVAL;
-
- }
- return status;
-}
-
-static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
-
-static inline void
-__ipq_reset(void)
-{
- peer_pid = 0;
- net_disable_timestamp();
- __ipq_set_mode(IPQ_COPY_NONE, 0);
- __ipq_flush(NULL, 0);
-}
-
-static struct nf_queue_entry *
-ipq_find_dequeue_entry(unsigned long id)
-{
- struct nf_queue_entry *entry = NULL, *i;
-
- spin_lock_bh(&queue_lock);
-
- list_for_each_entry(i, &queue_list, list) {
- if ((unsigned long)i == id) {
- entry = i;
- break;
- }
- }
-
- if (entry) {
- list_del(&entry->list);
- queue_total--;
- }
-
- spin_unlock_bh(&queue_lock);
- return entry;
-}
-
-static void
-__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
- struct nf_queue_entry *entry, *next;
-
- list_for_each_entry_safe(entry, next, &queue_list, list) {
- if (!cmpfn || cmpfn(entry, data)) {
- list_del(&entry->list);
- queue_total--;
- nf_reinject(entry, NF_DROP);
- }
- }
-}
-
-static void
-ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
- spin_lock_bh(&queue_lock);
- __ipq_flush(cmpfn, data);
- spin_unlock_bh(&queue_lock);
-}
-
-static struct sk_buff *
-ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
-{
- sk_buff_data_t old_tail;
- size_t size = 0;
- size_t data_len = 0;
- struct sk_buff *skb;
- struct ipq_packet_msg *pmsg;
- struct nlmsghdr *nlh;
- struct timeval tv;
-
- switch (ACCESS_ONCE(copy_mode)) {
- case IPQ_COPY_META:
- case IPQ_COPY_NONE:
- size = NLMSG_SPACE(sizeof(*pmsg));
- break;
-
- case IPQ_COPY_PACKET:
- if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
- (*errp = skb_checksum_help(entry->skb)))
- return NULL;
-
- data_len = ACCESS_ONCE(copy_range);
- if (data_len == 0 || data_len > entry->skb->len)
- data_len = entry->skb->len;
-
- size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
- break;
-
- default:
- *errp = -EINVAL;
- return NULL;
- }
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- goto nlmsg_failure;
-
- old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
- pmsg = NLMSG_DATA(nlh);
- memset(pmsg, 0, sizeof(*pmsg));
-
- pmsg->packet_id = (unsigned long )entry;
- pmsg->data_len = data_len;
- tv = ktime_to_timeval(entry->skb->tstamp);
- pmsg->timestamp_sec = tv.tv_sec;
- pmsg->timestamp_usec = tv.tv_usec;
- pmsg->mark = entry->skb->mark;
- pmsg->hook = entry->hook;
- pmsg->hw_protocol = entry->skb->protocol;
-
- if (entry->indev)
- strcpy(pmsg->indev_name, entry->indev->name);
- else
- pmsg->indev_name[0] = '\0';
-
- if (entry->outdev)
- strcpy(pmsg->outdev_name, entry->outdev->name);
- else
- pmsg->outdev_name[0] = '\0';
-
- if (entry->indev && entry->skb->dev &&
- entry->skb->mac_header != entry->skb->network_header) {
- pmsg->hw_type = entry->skb->dev->type;
- pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
- }
-
- if (data_len)
- if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
- BUG();
-
- nlh->nlmsg_len = skb->tail - old_tail;
- return skb;
-
-nlmsg_failure:
- kfree_skb(skb);
- *errp = -EINVAL;
- printk(KERN_ERR "ip6_queue: error creating packet message\n");
- return NULL;
-}
-
-static int
-ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
-{
- int status = -EINVAL;
- struct sk_buff *nskb;
-
- if (copy_mode == IPQ_COPY_NONE)
- return -EAGAIN;
-
- nskb = ipq_build_packet_message(entry, &status);
- if (nskb == NULL)
- return status;
-
- spin_lock_bh(&queue_lock);
-
- if (!peer_pid)
- goto err_out_free_nskb;
-
- if (queue_total >= queue_maxlen) {
- queue_dropped++;
- status = -ENOSPC;
- if (net_ratelimit())
- printk (KERN_WARNING "ip6_queue: fill at %d entries, "
- "dropping packet(s). Dropped: %d\n", queue_total,
- queue_dropped);
- goto err_out_free_nskb;
- }
-
- /* netlink_unicast will either free the nskb or attach it to a socket */
- status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
- if (status < 0) {
- queue_user_dropped++;
- goto err_out_unlock;
- }
-
- __ipq_enqueue_entry(entry);
-
- spin_unlock_bh(&queue_lock);
- return status;
-
-err_out_free_nskb:
- kfree_skb(nskb);
-
-err_out_unlock:
- spin_unlock_bh(&queue_lock);
- return status;
-}
-
-static int
-ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
-{
- int diff;
- struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
- struct sk_buff *nskb;
-
- if (v->data_len < sizeof(*user_iph))
- return 0;
- diff = v->data_len - e->skb->len;
- if (diff < 0) {
- if (pskb_trim(e->skb, v->data_len))
- return -ENOMEM;
- } else if (diff > 0) {
- if (v->data_len > 0xFFFF)
- return -EINVAL;
- if (diff > skb_tailroom(e->skb)) {
- nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
- diff, GFP_ATOMIC);
- if (!nskb) {
- printk(KERN_WARNING "ip6_queue: OOM "
- "in mangle, dropping packet\n");
- return -ENOMEM;
- }
- kfree_skb(e->skb);
- e->skb = nskb;
- }
- skb_put(e->skb, diff);
- }
- if (!skb_make_writable(e->skb, v->data_len))
- return -ENOMEM;
- skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
- e->skb->ip_summed = CHECKSUM_NONE;
-
- return 0;
-}
-
-static int
-ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
-{
- struct nf_queue_entry *entry;
-
- if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
- return -EINVAL;
-
- entry = ipq_find_dequeue_entry(vmsg->id);
- if (entry == NULL)
- return -ENOENT;
- else {
- int verdict = vmsg->value;
-
- if (vmsg->data_len && vmsg->data_len == len)
- if (ipq_mangle_ipv6(vmsg, entry) < 0)
- verdict = NF_DROP;
-
- nf_reinject(entry, verdict);
- return 0;
- }
-}
-
-static int
-ipq_set_mode(unsigned char mode, unsigned int range)
-{
- int status;
-
- spin_lock_bh(&queue_lock);
- status = __ipq_set_mode(mode, range);
- spin_unlock_bh(&queue_lock);
- return status;
-}
-
-static int
-ipq_receive_peer(struct ipq_peer_msg *pmsg,
- unsigned char type, unsigned int len)
-{
- int status = 0;
-
- if (len < sizeof(*pmsg))
- return -EINVAL;
-
- switch (type) {
- case IPQM_MODE:
- status = ipq_set_mode(pmsg->msg.mode.value,
- pmsg->msg.mode.range);
- break;
-
- case IPQM_VERDICT:
- status = ipq_set_verdict(&pmsg->msg.verdict,
- len - sizeof(*pmsg));
- break;
- default:
- status = -EINVAL;
- }
- return status;
-}
-
-static int
-dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
-{
- if (entry->indev)
- if (entry->indev->ifindex == ifindex)
- return 1;
-
- if (entry->outdev)
- if (entry->outdev->ifindex == ifindex)
- return 1;
-#ifdef CONFIG_BRIDGE_NETFILTER
- if (entry->skb->nf_bridge) {
- if (entry->skb->nf_bridge->physindev &&
- entry->skb->nf_bridge->physindev->ifindex == ifindex)
- return 1;
- if (entry->skb->nf_bridge->physoutdev &&
- entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
- return 1;
- }
-#endif
- return 0;
-}
-
-static void
-ipq_dev_drop(int ifindex)
-{
- ipq_flush(dev_cmp, ifindex);
-}
-
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
-static inline void
-__ipq_rcv_skb(struct sk_buff *skb)
-{
- int status, type, pid, flags;
- unsigned int nlmsglen, skblen;
- struct nlmsghdr *nlh;
- bool enable_timestamp = false;
-
- skblen = skb->len;
- if (skblen < sizeof(*nlh))
- return;
-
- nlh = nlmsg_hdr(skb);
- nlmsglen = nlh->nlmsg_len;
- if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
- return;
-
- pid = nlh->nlmsg_pid;
- flags = nlh->nlmsg_flags;
-
- if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
- RCV_SKB_FAIL(-EINVAL);
-
- if (flags & MSG_TRUNC)
- RCV_SKB_FAIL(-ECOMM);
-
- type = nlh->nlmsg_type;
- if (type < NLMSG_NOOP || type >= IPQM_MAX)
- RCV_SKB_FAIL(-EINVAL);
-
- if (type <= IPQM_BASE)
- return;
-
- if (!capable(CAP_NET_ADMIN))
- RCV_SKB_FAIL(-EPERM);
-
- spin_lock_bh(&queue_lock);
-
- if (peer_pid) {
- if (peer_pid != pid) {
- spin_unlock_bh(&queue_lock);
- RCV_SKB_FAIL(-EBUSY);
- }
- } else {
- enable_timestamp = true;
- peer_pid = pid;
- }
-
- spin_unlock_bh(&queue_lock);
- if (enable_timestamp)
- net_enable_timestamp();
-
- status = ipq_receive_peer(NLMSG_DATA(nlh), type,
- nlmsglen - NLMSG_LENGTH(0));
- if (status < 0)
- RCV_SKB_FAIL(status);
-
- if (flags & NLM_F_ACK)
- netlink_ack(skb, nlh, 0);
-}
-
-static void
-ipq_rcv_skb(struct sk_buff *skb)
-{
- mutex_lock(&ipqnl_mutex);
- __ipq_rcv_skb(skb);
- mutex_unlock(&ipqnl_mutex);
-}
-
-static int
-ipq_rcv_dev_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = ptr;
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- /* Drop any packets associated with the downed device */
- if (event == NETDEV_DOWN)
- ipq_dev_drop(dev->ifindex);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_dev_notifier = {
- .notifier_call = ipq_rcv_dev_event,
-};
-
-static int
-ipq_rcv_nl_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct netlink_notify *n = ptr;
-
- if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
- spin_lock_bh(&queue_lock);
- if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
- __ipq_reset();
- spin_unlock_bh(&queue_lock);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_nl_notifier = {
- .notifier_call = ipq_rcv_nl_event,
-};
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *ipq_sysctl_header;
-
-static ctl_table ipq_table[] = {
- {
- .procname = NET_IPQ_QMAX_NAME,
- .data = &queue_maxlen,
- .maxlen = sizeof(queue_maxlen),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- { }
-};
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int ip6_queue_show(struct seq_file *m, void *v)
-{
- spin_lock_bh(&queue_lock);
-
- seq_printf(m,
- "Peer PID : %d\n"
- "Copy mode : %hu\n"
- "Copy range : %u\n"
- "Queue length : %u\n"
- "Queue max. length : %u\n"
- "Queue dropped : %u\n"
- "Netfilter dropped : %u\n",
- peer_pid,
- copy_mode,
- copy_range,
- queue_total,
- queue_maxlen,
- queue_dropped,
- queue_user_dropped);
-
- spin_unlock_bh(&queue_lock);
- return 0;
-}
-
-static int ip6_queue_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ip6_queue_show, NULL);
-}
-
-static const struct file_operations ip6_queue_proc_fops = {
- .open = ip6_queue_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-#endif
-
-static const struct nf_queue_handler nfqh = {
- .name = "ip6_queue",
- .outfn = &ipq_enqueue_packet,
-};
-
-static int __init ip6_queue_init(void)
-{
- int status = -ENOMEM;
- struct proc_dir_entry *proc __maybe_unused;
-
- netlink_register_notifier(&ipq_nl_notifier);
- ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0,
- ipq_rcv_skb, NULL, THIS_MODULE);
- if (ipqnl == NULL) {
- printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
- goto cleanup_netlink_notifier;
- }
-
-#ifdef CONFIG_PROC_FS
- proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
- &ip6_queue_proc_fops);
- if (!proc) {
- printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
- goto cleanup_ipqnl;
- }
-#endif
- register_netdevice_notifier(&ipq_dev_notifier);
-#ifdef CONFIG_SYSCTL
- ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
-#endif
- status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
- if (status < 0) {
- printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
- goto cleanup_sysctl;
- }
- return status;
-
-cleanup_sysctl:
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(ipq_sysctl_header);
-#endif
- unregister_netdevice_notifier(&ipq_dev_notifier);
- proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
-cleanup_ipqnl: __maybe_unused
- netlink_kernel_release(ipqnl);
- mutex_lock(&ipqnl_mutex);
- mutex_unlock(&ipqnl_mutex);
-
-cleanup_netlink_notifier:
- netlink_unregister_notifier(&ipq_nl_notifier);
- return status;
-}
-
-static void __exit ip6_queue_fini(void)
-{
- nf_unregister_queue_handlers(&nfqh);
-
- ipq_flush(NULL, 0);
-
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(ipq_sysctl_header);
-#endif
- unregister_netdevice_notifier(&ipq_dev_notifier);
- proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
- netlink_kernel_release(ipqnl);
- mutex_lock(&ipqnl_mutex);
- mutex_unlock(&ipqnl_mutex);
-
- netlink_unregister_notifier(&ipq_nl_notifier);
-}
-
-MODULE_DESCRIPTION("IPv6 packet queue handler");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW);
-
-module_init(ip6_queue_init);
-module_exit(ip6_queue_fini);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcdc..d7cb04506c3d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
Hence the start of any table is given by get_table() below. */
-/* Check for an extension */
-int
-ip6t_ext_hdr(u8 nexthdr)
-{
- return (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS);
-}
-
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
@@ -146,7 +133,7 @@ ip6_packet_match(const struct sk_buff *skb,
int protohdr;
unsigned short _frag_off;
- protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
+ protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
if (protohdr < 0) {
if (_frag_off == 0)
*hotdrop = true;
@@ -194,8 +181,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6)
static unsigned int
ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (net_ratelimit())
- pr_info("error: `%s'\n", (const char *)par->targinfo);
+ net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
@@ -375,6 +361,7 @@ ip6t_do_table(struct sk_buff *skb,
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
+ acpar.thoff = 0;
if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
&acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
no_match:
@@ -409,7 +396,7 @@ ip6t_do_table(struct sk_buff *skb,
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
- verdict = (unsigned)(-v) - 1;
+ verdict = (unsigned int)(-v) - 1;
break;
}
if (*stackptr <= origptr)
@@ -2291,6 +2278,10 @@ static void __exit ip6_tables_fini(void)
* if target < 0. "last header" is transport protocol header, ESP, or
* "No next header".
*
+ * Note that *offset is used as input/output parameter. an if it is not zero,
+ * then it must be a valid offset to an inner IPv6 header. This can be used
+ * to explore inner IPv6 header, eg. ICMPv6 error messages.
+ *
* If target header is found, its offset is set in *offset and return protocol
* number. Otherwise, return -1.
*
@@ -2302,17 +2293,33 @@ static void __exit ip6_tables_fini(void)
* *offset is meaningless and fragment offset is stored in *fragoff if fragoff
* isn't NULL.
*
+ * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
+ * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
+ * target < 0, then this function will stop at the AH header.
*/
int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
- int target, unsigned short *fragoff)
+ int target, unsigned short *fragoff, int *flags)
{
unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
- unsigned int len = skb->len - start;
+ unsigned int len;
if (fragoff)
*fragoff = 0;
+ if (*offset) {
+ struct ipv6hdr _ip6, *ip6;
+
+ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+ if (!ip6 || (ip6->version != 6)) {
+ printk(KERN_ERR "IPv6 header not found\n");
+ return -EBADMSG;
+ }
+ start = *offset + sizeof(struct ipv6hdr);
+ nexthdr = ip6->nexthdr;
+ }
+ len = skb->len - start;
+
while (nexthdr != target) {
struct ipv6_opt_hdr _hdr, *hp;
unsigned int hdrlen;
@@ -2329,6 +2336,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
if (nexthdr == NEXTHDR_FRAGMENT) {
unsigned short _frag_off;
__be16 *fp;
+
+ if (flags) /* Indicate that this is a fragment */
+ *flags |= IP6T_FH_F_FRAG;
fp = skb_header_pointer(skb,
start+offsetof(struct frag_hdr,
frag_off),
@@ -2349,9 +2359,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
return -ENOENT;
}
hdrlen = 8;
- } else if (nexthdr == NEXTHDR_AUTH)
+ } else if (nexthdr == NEXTHDR_AUTH) {
+ if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
+ break;
hdrlen = (hp->hdrlen + 2) << 2;
- else
+ } else
hdrlen = ipv6_optlen(hp);
nexthdr = hp->nexthdr;
@@ -2366,7 +2378,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_ext_hdr);
EXPORT_SYMBOL(ipv6_find_hdr);
module_init(ip6_tables_init);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index aad2fa41cf46..fd4fb34c51c7 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -114,8 +114,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
GFP_ATOMIC);
if (!nskb) {
- if (net_ratelimit())
- pr_debug("cannot alloc skb\n");
+ net_dbg_ratelimited("cannot alloc skb\n");
dst_release(dst);
return;
}
@@ -210,8 +209,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
send_reset(net, skb);
break;
default:
- if (net_ratelimit())
- pr_info("case %u not handled yet\n", reject->with);
+ net_info_ratelimited("case %u not handled yet\n", reject->with);
break;
}
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 89cccc5a9c92..04099ab7d2e3 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -41,11 +41,11 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
struct ip_auth_hdr _ah;
const struct ip_auth_hdr *ah;
const struct ip6t_ah *ahinfo = par->matchinfo;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index eda898fda6ca..3b5735e56bfe 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -40,10 +40,10 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
struct frag_hdr _frag;
const struct frag_hdr *fh;
const struct ip6t_frag *fraginfo = par->matchinfo;
- unsigned int ptr;
+ unsigned int ptr = 0;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 59df051eaef6..01df142bb027 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -50,7 +50,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
const struct ipv6_opt_hdr *oh;
const struct ip6t_opts *optinfo = par->matchinfo;
unsigned int temp;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
bool ret = false;
u8 _opttype;
@@ -62,7 +62,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
err = ipv6_find_hdr(skb, &ptr,
(par->match == &hbh_mt6_reg[0]) ?
- NEXTHDR_HOP : NEXTHDR_DEST, NULL);
+ NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index d8488c50a8e0..2c99b94eeca3 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -42,14 +42,14 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
const struct ipv6_rt_hdr *rh;
const struct ip6t_rt *rtinfo = par->matchinfo;
unsigned int temp;
- unsigned int ptr;
+ unsigned int ptr = 0;
unsigned int hdrlen = 0;
bool ret = false;
struct in6_addr _addr;
const struct in6_addr *ap;
int err;
- err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL);
+ err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 00d19173db7e..4d782405f125 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,8 +42,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- pr_warning("ip6t_hook: happy cracking.\n");
+ net_warn_ratelimited("ip6t_hook: happy cracking\n");
return NF_ACCEPT;
}
#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4111050a9fc5..3224ef90a21a 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -232,8 +232,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct ipv6hdr)) {
- if (net_ratelimit())
- pr_notice("ipv6_conntrack_local: packet too short\n");
+ net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
return NF_ACCEPT;
}
return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
@@ -278,10 +277,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
- &tuple->src.u3.ip6);
- NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
- &tuple->dst.u3.ip6);
+ if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
+ &tuple->src.u3.ip6) ||
+ nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
+ &tuple->dst.u3.ip6))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 92cc9f2931ae..3e81904fbbcd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -234,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *t)
{
- NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id);
- NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type);
- NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code);
-
+ if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
+ nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
+ nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -300,8 +300,8 @@ icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
-
+ if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0298d3..c9c78c2e666b 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -444,12 +444,11 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
return head;
out_oversize:
- if (net_ratelimit())
- printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
+ net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
+ payload_len);
goto out_fail;
out_oom:
- if (net_ratelimit())
- printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
+ net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
out_fail:
return NULL;
}
@@ -626,8 +625,8 @@ int nf_ct_frag6_init(void)
inet_frags_init(&nf_frags);
#ifdef CONFIG_SYSCTL
- nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
- nf_ct_frag6_sysctl_table);
+ nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
+ nf_ct_frag6_sysctl_table);
if (!nf_ct_frag6_sysctl_header) {
inet_frags_fini(&nf_frags);
return -ENOMEM;
@@ -640,7 +639,7 @@ int nf_ct_frag6_init(void)
void nf_ct_frag6_cleanup(void)
{
#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(nf_ct_frag6_sysctl_header);
+ unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
nf_ct_frag6_sysctl_header = NULL;
#endif
inet_frags_fini(&nf_frags);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5bddea778840..93d69836fded 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
const struct in6_addr *rmt_addr, int dif)
{
struct hlist_node *node;
- int is_multicast = ipv6_addr_is_multicast(loc_addr);
+ bool is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk, node)
if (inet_sk(sk)->inet_num == num) {
@@ -153,12 +153,12 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
*
* Caller owns SKB so we must make clones.
*/
-static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
- int delivered = 0;
+ bool delivered = false;
__u8 hash;
struct net *net;
@@ -179,7 +179,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
while (sk) {
int filtered;
- delivered = 1;
+ delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
@@ -225,7 +225,7 @@ out:
return delivered;
}
-int raw6_local_deliver(struct sk_buff *skb, int nexthdr)
+bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 9447bd69873a..4ff9af628e72 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -134,15 +134,16 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q)
return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
}
-int ip6_frag_match(struct inet_frag_queue *q, void *a)
+bool ip6_frag_match(struct inet_frag_queue *q, void *a)
{
struct frag_queue *fq;
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id && fq->user == arg->user &&
- ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst));
+ return fq->id == arg->id &&
+ fq->user == arg->user &&
+ ipv6_addr_equal(&fq->saddr, arg->src) &&
+ ipv6_addr_equal(&fq->daddr, arg->dst);
}
EXPORT_SYMBOL(ip6_frag_match);
@@ -414,6 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct sk_buff *fp, *head = fq->q.fragments;
int payload_len;
unsigned int nhoff;
+ int sum_truesize;
fq_kill(fq);
@@ -433,7 +435,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_morph(head, fq->q.fragments);
head->next = fq->q.fragments->next;
- kfree_skb(fq->q.fragments);
+ consume_skb(fq->q.fragments);
fq->q.fragments = head;
}
@@ -483,20 +485,33 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
- skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
}
- atomic_sub(head->truesize, &fq->q.net->mem);
+ atomic_sub(sum_truesize, &fq->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -518,12 +533,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
return 1;
out_oversize:
- if (net_ratelimit())
- printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
+ net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
goto out_fail;
out_oom:
- if (net_ratelimit())
- printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
+ net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
out_fail:
rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
@@ -646,7 +659,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
table[2].data = &net->ipv6.frags.timeout;
}
- hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
+ hdr = register_net_sysctl(net, "net/ipv6", table);
if (hdr == NULL)
goto err_reg;
@@ -674,7 +687,7 @@ static struct ctl_table_header *ip6_ctl_header;
static int ip6_frags_sysctl_register(void)
{
- ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
+ ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
ip6_frags_ctl_table);
return ip6_ctl_header == NULL ? -ENOMEM : 0;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3992e26a6039..999a982ad3fd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -24,6 +24,8 @@
* Fixed routing subtrees.
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -62,7 +64,7 @@
#include <linux/sysctl.h>
#endif
-static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
+static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -82,7 +84,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex,
- unsigned pref);
+ unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex);
@@ -285,6 +287,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
+
+ if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
+ dst_release(dst->from);
+
if (peer) {
rt->rt6i_peer = NULL;
inet_putpeer(peer);
@@ -327,13 +333,22 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
}
}
-static __inline__ int rt6_check_expired(const struct rt6_info *rt)
+static bool rt6_check_expired(const struct rt6_info *rt)
{
- return (rt->rt6i_flags & RTF_EXPIRES) &&
- time_after(jiffies, rt->dst.expires);
+ struct rt6_info *ort = NULL;
+
+ if (rt->rt6i_flags & RTF_EXPIRES) {
+ if (time_after(jiffies, rt->dst.expires))
+ return true;
+ } else if (rt->dst.from) {
+ ort = (struct rt6_info *) rt->dst.from;
+ return (ort->rt6i_flags & RTF_EXPIRES) &&
+ time_after(jiffies, ort->dst.expires);
+ }
+ return false;
}
-static inline int rt6_need_strict(const struct in6_addr *daddr)
+static bool rt6_need_strict(const struct in6_addr *daddr)
{
return ipv6_addr_type(daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -620,12 +635,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
- if (!addrconf_finite_timeout(lifetime)) {
- rt->rt6i_flags &= ~RTF_EXPIRES;
- } else {
- rt->dst.expires = jiffies + HZ * lifetime;
- rt->rt6i_flags |= RTF_EXPIRES;
- }
+ if (!addrconf_finite_timeout(lifetime))
+ rt6_clean_expires(rt);
+ else
+ rt6_set_expires(rt, jiffies + HZ * lifetime);
+
dst_release(&rt->dst);
}
return 0;
@@ -730,7 +744,7 @@ int ip6_ins_rt(struct rt6_info *rt)
return __ip6_ins_rt(rt, &info);
}
-static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
+static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
@@ -782,9 +796,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
goto retry;
}
- if (net_ratelimit())
- printk(KERN_WARNING
- "ipv6: Neighbour table overflow.\n");
+ net_warn_ratelimited("Neighbour table overflow\n");
dst_free(&rt->dst);
return NULL;
}
@@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
- rt->dst.expires = 0;
rt->rt6i_gateway = ort->rt6i_gateway;
- rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
+ rt->rt6i_flags = ort->rt6i_flags;
+ rt6_clean_expires(rt);
rt->rt6i_metric = 0;
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
- if (rt->rt6i_flags & RTF_CACHE) {
- dst_set_expires(&rt->dst, 0);
- rt->rt6i_flags |= RTF_EXPIRES;
- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
+ if (rt->rt6i_flags & RTF_CACHE)
+ rt6_update_expires(rt, 0);
+ else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
rt->rt6i_node->fn_sernum = -1;
}
}
@@ -1271,7 +1282,7 @@ int ip6_route_add(struct fib6_config *cfg)
!(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
table = fib6_get_table(net, cfg->fc_table);
if (!table) {
- printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+ pr_warn("NLM_F_CREATE should be specified when creating new route\n");
table = fib6_new_table(net, cfg->fc_table);
}
} else {
@@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
}
rt->dst.obsolete = -1;
- rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
- jiffies + clock_t_to_jiffies(cfg->fc_expires) :
- 0;
+
+ if (cfg->fc_flags & RTF_EXPIRES)
+ rt6_set_expires(rt, jiffies +
+ clock_t_to_jiffies(cfg->fc_expires));
+ else
+ rt6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
@@ -1629,9 +1643,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
if (rt == net->ipv6.ip6_null_entry) {
- if (net_ratelimit())
- printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
- "for redirect target\n");
+ net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
goto out;
}
@@ -1736,8 +1748,8 @@ again:
features |= RTAX_FEATURE_ALLFRAG;
dst_metric_set(&rt->dst, RTAX_FEATURES, features);
}
- dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
- rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
+ rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ rt->rt6i_flags |= RTF_MODIFIED;
goto out;
}
@@ -1765,9 +1777,8 @@ again:
* which is 10 mins. After 10 mins the decreased pmtu is expired
* and detecting PMTU increase will be automatically happened.
*/
- dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
- nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
-
+ rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ nrt->rt6i_flags |= RTF_DYNAMIC;
ip6_ins_rt(nrt);
}
out:
@@ -1799,7 +1810,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
* Misc support functions
*/
-static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
+static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest)
{
struct net *net = dev_net(ort->dst.dev);
@@ -1819,10 +1830,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->dst.lastuse = jiffies;
- rt->dst.expires = 0;
rt->rt6i_gateway = ort->rt6i_gateway;
- rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
+ rt->rt6i_flags = ort->rt6i_flags;
+ if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
+ (RTF_DEFAULT | RTF_ADDRCONF))
+ rt6_set_from(rt, ort);
+ else
+ rt6_clean_expires(rt);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
@@ -1870,7 +1885,7 @@ out:
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
const struct in6_addr *gwaddr, int ifindex,
- unsigned pref)
+ unsigned int pref)
{
struct fib6_config cfg = {
.fc_table = RT6_TABLE_INFO,
@@ -2089,9 +2104,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
int err;
if (!rt) {
- if (net_ratelimit())
- pr_warning("IPv6: Maximum number of routes reached,"
- " consider increasing route/max_size.\n");
+ net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
return ERR_PTR(-ENOMEM);
}
@@ -2200,10 +2213,9 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
icmp6_clean_all(fib6_ifdown, &adn);
}
-struct rt6_mtu_change_arg
-{
+struct rt6_mtu_change_arg {
struct net_device *dev;
- unsigned mtu;
+ unsigned int mtu;
};
static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
@@ -2245,7 +2257,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
return 0;
}
-void rt6_mtu_change(struct net_device *dev, unsigned mtu)
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
{
struct rt6_mtu_change_arg arg = {
.dev = dev,
@@ -2413,7 +2425,8 @@ static int rt6_fill_node(struct net *net,
else
table = RT6_TABLE_UNSPEC;
rtm->rtm_table = table;
- NLA_PUT_U32(skb, RTA_TABLE, table);
+ if (nla_put_u32(skb, RTA_TABLE, table))
+ goto nla_put_failure;
if (rt->rt6i_flags & RTF_REJECT)
rtm->rtm_type = RTN_UNREACHABLE;
else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2436,16 +2449,20 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_flags |= RTM_F_CLONED;
if (dst) {
- NLA_PUT(skb, RTA_DST, 16, dst);
+ if (nla_put(skb, RTA_DST, 16, dst))
+ goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
- NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
+ if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+ goto nla_put_failure;
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
- NLA_PUT(skb, RTA_SRC, 16, src);
+ if (nla_put(skb, RTA_SRC, 16, src))
+ goto nla_put_failure;
rtm->rtm_src_len = 128;
- } else if (rtm->rtm_src_len)
- NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
+ } else if (rtm->rtm_src_len &&
+ nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+ goto nla_put_failure;
#endif
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
@@ -2463,17 +2480,20 @@ static int rt6_fill_node(struct net *net,
}
} else
#endif
- NLA_PUT_U32(skb, RTA_IIF, iif);
+ if (nla_put_u32(skb, RTA_IIF, iif))
+ goto nla_put_failure;
} else if (dst) {
struct in6_addr saddr_buf;
- if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
- NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+ if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
+ nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+ goto nla_put_failure;
}
if (rt->rt6i_prefsrc.plen) {
struct in6_addr saddr_buf;
saddr_buf = rt->rt6i_prefsrc.addr;
- NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+ if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+ goto nla_put_failure;
}
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2489,11 +2509,11 @@ static int rt6_fill_node(struct net *net,
}
rcu_read_unlock();
- if (rt->dst.dev)
- NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-
- NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
-
+ if (rt->dst.dev &&
+ nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
+ goto nla_put_failure;
if (!(rt->rt6i_flags & RTF_EXPIRES))
expires = 0;
else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2598,6 +2618,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
+ dst_release(&rt->dst);
err = -ENOBUFS;
goto errout;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c4ffd1743528..60415711563f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -17,6 +17,8 @@
* Fred Templin <fred.l.templin@boeing.com>: isatap support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
@@ -87,35 +89,51 @@ struct sit_net {
/* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_tstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
-static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
- struct pcpu_tstats sum = { 0 };
int i;
for_each_possible_cpu(i) {
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
}
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
+
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ return tot;
}
+
/*
* Must be invoked with rcu_read_lock
*/
-static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
struct net_device *dev, __be32 remote, __be32 local)
{
unsigned int h0 = HASH(remote);
@@ -686,12 +704,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "sit: nexthop == NULL\n");
+ net_dbg_ratelimited("sit: nexthop == NULL\n");
goto tx_error;
}
- addr6 = (const struct in6_addr*)&neigh->primary_key;
+ addr6 = (const struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -716,12 +733,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
- if (net_ratelimit())
- printk(KERN_DEBUG "sit: nexthop == NULL\n");
+ net_dbg_ratelimited("sit: nexthop == NULL\n");
goto tx_error;
}
- addr6 = (const struct in6_addr*)&neigh->primary_key;
+ addr6 = (const struct in6_addr *)&neigh->primary_key;
addr_type = ipv6_addr_type(addr6);
if (addr_type == IPV6_ADDR_ANY) {
@@ -1126,7 +1142,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_start_xmit = ipip6_tunnel_xmit,
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_change_mtu = ipip6_tunnel_change_mtu,
- .ndo_get_stats = ipip6_get_stats,
+ .ndo_get_stats64= ipip6_get_stats64,
};
static void ipip6_dev_free(struct net_device *dev)
@@ -1287,7 +1303,7 @@ static int __init sit_init(void)
{
int err;
- printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
+ pr_info("IPv6 over IPv4 tunneling driver\n");
err = register_pernet_device(&sit_net_ops);
if (err < 0)
@@ -1295,7 +1311,7 @@ static int __init sit_init(void)
err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
if (err < 0) {
unregister_pernet_device(&sit_net_ops);
- printk(KERN_INFO "sit init: Can't add protocol\n");
+ pr_info("%s: can't add protocol\n", __func__);
}
return err;
}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 166a57c47d39..e85c48bd404f 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,32 +16,8 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
-static struct ctl_table empty[1];
-
-static ctl_table ipv6_static_skeleton[] = {
- {
- .procname = "neigh",
- .maxlen = 0,
- .mode = 0555,
- .child = empty,
- },
- { }
-};
-
static ctl_table ipv6_table_template[] = {
{
- .procname = "route",
- .maxlen = 0,
- .mode = 0555,
- .child = ipv6_route_table_template
- },
- {
- .procname = "icmp",
- .maxlen = 0,
- .mode = 0555,
- .child = ipv6_icmp_table_template
- },
- {
.procname = "bindv6only",
.data = &init_net.ipv6.sysctl.bindv6only,
.maxlen = sizeof(int),
@@ -62,13 +38,6 @@ static ctl_table ipv6_rotable[] = {
{ }
};
-struct ctl_path net_ipv6_ctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv6", },
- { },
-};
-EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
-
static int __net_init ipv6_sysctl_net_init(struct net *net)
{
struct ctl_table *ipv6_table;
@@ -81,28 +50,37 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
GFP_KERNEL);
if (!ipv6_table)
goto out;
+ ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
goto out_ipv6_table;
- ipv6_table[0].child = ipv6_route_table;
ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
if (!ipv6_icmp_table)
goto out_ipv6_route_table;
- ipv6_table[1].child = ipv6_icmp_table;
- ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
-
- net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
- ipv6_table);
- if (!net->ipv6.sysctl.table)
+ net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
+ if (!net->ipv6.sysctl.hdr)
goto out_ipv6_icmp_table;
+ net->ipv6.sysctl.route_hdr =
+ register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
+ if (!net->ipv6.sysctl.route_hdr)
+ goto out_unregister_ipv6_table;
+
+ net->ipv6.sysctl.icmp_hdr =
+ register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
+ if (!net->ipv6.sysctl.icmp_hdr)
+ goto out_unregister_route_table;
+
err = 0;
out:
return err;
-
+out_unregister_route_table:
+ unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+out_unregister_ipv6_table:
+ unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
out_ipv6_icmp_table:
kfree(ipv6_icmp_table);
out_ipv6_route_table:
@@ -118,11 +96,13 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net)
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
- ipv6_table = net->ipv6.sysctl.table->ctl_table_arg;
- ipv6_route_table = ipv6_table[0].child;
- ipv6_icmp_table = ipv6_table[1].child;
+ ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
+ ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
+ ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
- unregister_net_sysctl_table(net->ipv6.sysctl.table);
+ unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
+ unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+ unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
kfree(ipv6_table);
kfree(ipv6_route_table);
@@ -140,7 +120,7 @@ int ipv6_sysctl_register(void)
{
int err = -ENOMEM;
- ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable);
+ ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
if (ip6_header == NULL)
goto out;
@@ -160,18 +140,3 @@ void ipv6_sysctl_unregister(void)
unregister_net_sysctl_table(ip6_header);
unregister_pernet_subsys(&ipv6_sysctl_net_ops);
}
-
-static struct ctl_table_header *ip6_base;
-
-int ipv6_static_sysctl_register(void)
-{
- ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
- if (ip6_base == NULL)
- return -ENOMEM;
- return 0;
-}
-
-void ipv6_static_sysctl_unregister(void)
-{
- unregister_net_sysctl_table(ip6_base);
-}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece67f39..554d5999abc4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -723,12 +723,10 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
- if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
- genhash ? "failed" : "mismatch",
- &ip6h->saddr, ntohs(th->source),
- &ip6h->daddr, ntohs(th->dest));
- }
+ net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
+ genhash ? "failed" : "mismatch",
+ &ip6h->saddr, ntohs(th->source),
+ &ip6h->daddr, ntohs(th->dest));
return 1;
}
return 0;
@@ -1057,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
- int want_cookie = 0;
+ bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
@@ -1118,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
- want_cookie = 0; /* not our kind of cookie */
+ want_cookie = false; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
@@ -1140,7 +1138,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
treq->rmt_addr = ipv6_hdr(skb)->saddr;
treq->loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, tcp_hdr(skb));
+ TCP_ECN_create_request(req, skb);
treq->iif = sk->sk_bound_dev_if;
@@ -1353,7 +1351,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->pktoptions = NULL;
if (treq->pktopts != NULL) {
newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
- kfree_skb(treq->pktopts);
+ consume_skb(treq->pktopts);
treq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
@@ -1383,6 +1381,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
+ if (tcp_sk(sk)->rx_opt.user_mss &&
+ tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
+ newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+
tcp_initialize_rcv_mss(newsk);
if (tcp_rsk(req)->snt_synack)
tcp_valid_rtt_meas(newsk,
@@ -1645,7 +1647,7 @@ process:
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+ tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb);
else
@@ -1654,7 +1656,8 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
- } else if (unlikely(sk_add_backlog(sk, skb))) {
+ } else if (unlikely(sk_add_backlog(sk, skb,
+ sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
@@ -1773,6 +1776,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.syn_recv_sock = tcp_v6_syn_recv_sock,
.get_peer = tcp_v6_get_peer,
.net_header_len = sizeof(struct ipv6hdr),
+ .net_frag_header_len = sizeof(struct frag_hdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
@@ -1829,64 +1833,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
static int tcp_v6_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- skb_queue_head_init(&tp->out_of_order_queue);
- tcp_init_xmit_timers(sk);
- tcp_prequeue_init(tp);
-
- icsk->icsk_rto = TCP_TIMEOUT_INIT;
- tp->mdev = TCP_TIMEOUT_INIT;
-
- /* So many TCP implementations out there (incorrectly) count the
- * initial SYN frame in their delayed-ACK and congestion control
- * algorithms that we must have the following bandaid to talk
- * efficiently to them. -DaveM
- */
- tp->snd_cwnd = 2;
- /* See draft-stevens-tcpca-spec-01 for discussion of the
- * initialization of these values.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- tp->snd_cwnd_clamp = ~0;
- tp->mss_cache = TCP_MSS_DEFAULT;
-
- tp->reordering = sysctl_tcp_reordering;
-
- sk->sk_state = TCP_CLOSE;
+ tcp_init_sock(sk);
icsk->icsk_af_ops = &ipv6_specific;
- icsk->icsk_ca_ops = &tcp_init_congestion_ops;
- icsk->icsk_sync_mss = tcp_sync_mss;
- sk->sk_write_space = sk_stream_write_space;
- sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
#ifdef CONFIG_TCP_MD5SIG
- tp->af_specific = &tcp_sock_ipv6_specific;
+ tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
#endif
- /* TCP Cookie Transactions */
- if (sysctl_tcp_cookie_size > 0) {
- /* Default, cookies without s_data_payload. */
- tp->cookie_values =
- kzalloc(sizeof(*tp->cookie_values),
- sk->sk_allocation);
- if (tp->cookie_values != NULL)
- kref_init(&tp->cookie_values->kref);
- }
- /* Presumed zeroed, in order of appearance:
- * cookie_in_always, cookie_out_never,
- * s_data_constant, s_data_in, s_data_out
- */
- sk->sk_sndbuf = sysctl_tcp_wmem[1];
- sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
- local_bh_disable();
- sock_update_memcg(sk);
- sk_sockets_allocated_inc(sk);
- local_bh_enable();
-
return 0;
}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 4f3cec12aa85..4b0f50d9a962 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -19,6 +19,8 @@
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*/
+#define pr_fmt(fmt) "IPv6: " fmt
+
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -160,11 +162,11 @@ static const struct inet6_protocol tunnel46_protocol = {
static int __init tunnel6_init(void)
{
if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
- printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+ pr_err("%s: can't add protocol\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
- printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+ pr_err("%s: can't add protocol\n", __func__);
inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
return -EAGAIN;
}
@@ -174,9 +176,9 @@ static int __init tunnel6_init(void)
static void __exit tunnel6_fini(void)
{
if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
- printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+ pr_err("%s: can't remove protocol\n", __func__);
if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
- printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+ pr_err("%s: can't remove protocol\n", __func__);
}
module_init(tunnel6_init);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 37b0699e95e5..f05099fc5901 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
- unsigned int hash2_partial =
+ unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
/* precompute partial secondary hash */
@@ -349,7 +349,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
bool slow;
if (addr_len)
- *addr_len=sizeof(struct sockaddr_in6);
+ *addr_len = sizeof(struct sockaddr_in6);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
@@ -496,6 +496,28 @@ out:
sock_put(sk);
}
+static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+ sock_rps_save_rxhash(sk, skb);
+
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0) {
+ int is_udplite = IS_UDPLITE(sk);
+
+ /* Note that an ENOMEM error is charged twice */
+ if (rc == -ENOMEM)
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, is_udplite);
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ kfree_skb(skb);
+ return -1;
+ }
+ return 0;
+}
+
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info )
@@ -503,18 +525,54 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
-int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+static struct static_key udpv6_encap_needed __read_mostly;
+void udpv6_encap_enable(void)
+{
+ if (!static_key_enabled(&udpv6_encap_needed))
+ static_key_slow_inc(&udpv6_encap_needed);
+}
+EXPORT_SYMBOL(udpv6_encap_enable);
+
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
- if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
- sock_rps_save_rxhash(sk, skb);
-
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
+ if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+ /*
+ * This is an encapsulation socket so pass the skb to
+ * the socket's udp_encap_rcv() hook. Otherwise, just
+ * fall through and pass this up the UDP socket.
+ * up->encap_rcv() returns the following value:
+ * =0 if skb was successfully passed to the encap
+ * handler or was discarded by it.
+ * >0 if skb should be passed on to UDP.
+ * <0 if skb should be resubmitted as proto -N
+ */
+
+ /* if we're overly short, let UDP handle it */
+ encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+ int ret;
+
+ ret = encap_rcv(sk, skb);
+ if (ret <= 0) {
+ UDP_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_INDATAGRAMS,
+ is_udplite);
+ return -ret;
+ }
+ }
+
+ /* FALLTHROUGH -- it's a UDP Packet */
+ }
+
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
@@ -539,21 +597,25 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
+ if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+ goto drop;
+
skb_dst_drop(skb);
- rc = sock_queue_rcv_skb(sk, skb);
- if (rc < 0) {
- /* Note that an ENOMEM error is charged twice */
- if (rc == -ENOMEM)
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_RCVBUFERRORS, is_udplite);
- goto drop_no_sk_drops_inc;
+
+ bh_lock_sock(sk);
+ rc = 0;
+ if (!sock_owned_by_user(sk))
+ rc = __udpv6_queue_rcv_skb(sk, skb);
+ else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+ bh_unlock_sock(sk);
+ goto drop;
}
+ bh_unlock_sock(sk);
- return 0;
+ return rc;
drop:
- atomic_inc(&sk->sk_drops);
-drop_no_sk_drops_inc:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
@@ -602,37 +664,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
- unsigned int i;
+ struct sk_buff *skb1 = NULL;
struct sock *sk;
- struct sk_buff *skb1;
+ unsigned int i;
for (i = 0; i < count; i++) {
- skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-
sk = stack[i];
- if (skb1) {
- if (sk_rcvqueues_full(sk, skb1)) {
- kfree_skb(skb1);
- goto drop;
- }
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb1);
- else if (sk_add_backlog(sk, skb1)) {
- kfree_skb(skb1);
- bh_unlock_sock(sk);
- goto drop;
- }
- bh_unlock_sock(sk);
- continue;
+ if (likely(skb1 == NULL))
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+ if (!skb1) {
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
}
-drop:
- atomic_inc(&sk->sk_drops);
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_INERRORS, IS_UDPLITE(sk));
+
+ if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
+ skb1 = NULL;
}
+ if (unlikely(skb1))
+ kfree_skb(skb1);
}
/*
* Note: called only from the BH handler context,
@@ -772,39 +824,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ if (sk != NULL) {
+ int ret = udpv6_queue_rcv_skb(sk, skb);
+ sock_put(sk);
- if (sk == NULL) {
- if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
- goto discard;
-
- if (udp_lib_checksum_complete(skb))
- goto discard;
- UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
- proto == IPPROTO_UDPLITE);
-
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+ /* a return value > 0 means to resubmit the input, but
+ * it wants the return to be -protocol, or 0
+ */
+ if (ret > 0)
+ return -ret;
- kfree_skb(skb);
return 0;
}
- /* deliver */
-
- if (sk_rcvqueues_full(sk, skb)) {
- sock_put(sk);
+ if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
- }
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb);
- else if (sk_add_backlog(sk, skb)) {
- atomic_inc(&sk->sk_drops);
- bh_unlock_sock(sk);
- sock_put(sk);
+
+ if (udp_lib_checksum_complete(skb))
goto discard;
- }
- bh_unlock_sock(sk);
- sock_put(sk);
+
+ UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
return 0;
short_packet:
@@ -1337,7 +1379,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len- offset, 0);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
@@ -1471,7 +1513,7 @@ struct proto udpv6_prot = {
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
- .backlog_rcv = udpv6_queue_rcv_skb,
+ .backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8ea65e032733..8625fba96db9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -334,8 +334,8 @@ int __init xfrm6_init(void)
goto out_policy;
#ifdef CONFIG_SYSCTL
- sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
- xfrm6_policy_table);
+ sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
+ xfrm6_policy_table);
#endif
out:
return ret;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4fe1db12d2a3..ee5a7065aacc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,9 +68,9 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
-static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
+static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
{
- unsigned h;
+ unsigned int h;
h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
h ^= h >> 16;
@@ -80,7 +80,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
return h;
}
-static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
+static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
{
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
}
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 9680226640ef..dfd6faaf0ea7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -983,10 +983,6 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
goto out;
switch (idef->ipx_dlink_type) {
- case IPX_FRAME_TR_8022:
- printk(KERN_WARNING "IPX frame type 802.2TR is "
- "obsolete Use 802.2 instead.\n");
- /* fall through */
case IPX_FRAME_8022:
dlink_type = htons(ETH_P_802_2);
datalink = p8022_datalink;
@@ -996,10 +992,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
dlink_type = htons(ETH_P_IPX);
datalink = pEII_datalink;
break;
- } else
- printk(KERN_WARNING "IPX frame type EtherII over "
- "token-ring is obsolete. Use SNAP "
- "instead.\n");
+ }
/* fall through */
case IPX_FRAME_SNAP:
dlink_type = htons(ETH_P_SNAP);
@@ -1275,7 +1268,6 @@ const char *ipx_frame_name(__be16 frame)
case ETH_P_802_2: rc = "802.2"; break;
case ETH_P_SNAP: rc = "SNAP"; break;
case ETH_P_802_3: rc = "802.3"; break;
- case ETH_P_TR_802_2: rc = "802.2TR"; break;
}
return rc;
@@ -1909,9 +1901,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
(const unsigned short __user *)argp);
break;
case SIOCGSTAMP:
- rc = -EINVAL;
- if (sk)
- rc = sock_get_timestamp(sk, argp);
+ rc = sock_get_timestamp(sk, argp);
break;
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index bd6dca00fb85..ad7c03dedaab 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/sysctl.h>
+#include <net/net_namespace.h>
#ifndef CONFIG_SYSCTL
#error This file should not be compiled without CONFIG_SYSCTL defined
@@ -27,20 +28,14 @@ static struct ctl_table ipx_table[] = {
{ },
};
-static struct ctl_path ipx_path[] = {
- { .procname = "net", },
- { .procname = "ipx", },
- { }
-};
-
static struct ctl_table_header *ipx_table_header;
void ipx_register_sysctl(void)
{
- ipx_table_header = register_sysctl_paths(ipx_path, ipx_table);
+ ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
}
void ipx_unregister_sysctl(void)
{
- unregister_sysctl_table(ipx_table_header);
+ unregister_net_sysctl_table(ipx_table_header);
}
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 77c5e6499f8f..d0667d68351d 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -54,7 +54,7 @@
*/
static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
{
- unsigned cflag, cval;
+ unsigned int cflag, cval;
int baud;
IRDA_DEBUG(2, "%s()\n", __func__ );
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2615ffc8e785..de73f6496db5 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -235,12 +235,6 @@ static ctl_table irda_table[] = {
{ }
};
-static struct ctl_path irda_path[] = {
- { .procname = "net", },
- { .procname = "irda", },
- { }
-};
-
static struct ctl_table_header *irda_table_header;
/*
@@ -251,7 +245,7 @@ static struct ctl_table_header *irda_table_header;
*/
int __init irda_sysctl_register(void)
{
- irda_table_header = register_sysctl_paths(irda_path, irda_table);
+ irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
if (!irda_table_header)
return -ENOMEM;
@@ -266,7 +260,7 @@ int __init irda_sysctl_register(void)
*/
void irda_sysctl_unregister(void)
{
- unregister_sysctl_table(irda_table_header);
+ unregister_net_sysctl_table(irda_table_header);
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11dbb2255ccb..34e418508a67 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1714,7 +1714,7 @@ static int key_notify_sa_flush(const struct km_event *c)
static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
- unsigned proto;
+ unsigned int proto;
struct km_event c;
struct xfrm_audit audit_info;
int err, err2;
@@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
/* Addresses to be used by KM for negotiation, if ext is available */
if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
- return -EINVAL;
+ goto err;
/* selector src */
set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
@@ -3547,7 +3547,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
goto out;
err = -EMSGSIZE;
- if ((unsigned)len > sk->sk_sndbuf - 32)
+ if ((unsigned int)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
index 110e7bc2de5e..2870f41ea44d 100644
--- a/net/l2tp/Makefile
+++ b/net/l2tp/Makefile
@@ -10,3 +10,6 @@ obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
+ifneq ($(CONFIG_IPV6),)
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o
+endif
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c67943e..32b2155e7ab4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -18,6 +18,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -53,6 +55,10 @@
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <net/protocol.h>
+#include <net/inet6_connection_sock.h>
+#include <net/inet_ecn.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
@@ -82,12 +88,6 @@
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "L2TP: " _fmt, ##args); \
- } while (0)
-
/* Private data stored for received packets in the skb.
*/
struct l2tp_skb_cb {
@@ -137,14 +137,20 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
l2tp_tunnel_free(tunnel);
}
#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
- } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
- } while (0)
+#define l2tp_tunnel_inc_refcount(_t) \
+do { \
+ pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_t)->name, \
+ atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+} while (0)
+#define l2tp_tunnel_dec_refcount(_t)
+do { \
+ pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_t)->name, \
+ atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+} while (0)
#else
#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
@@ -326,16 +332,20 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
struct sk_buff *skbp;
struct sk_buff *tmp;
u32 ns = L2TP_SKB_CB(skb)->ns;
+ struct l2tp_stats *sstats;
spin_lock_bh(&session->reorder_q.lock);
+ sstats = &session->stats;
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
if (L2TP_SKB_CB(skbp)->ns > ns) {
__skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, L2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
- session->stats.rx_oos_packets++;
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, L2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_oos_packets++;
+ u64_stats_update_end(&sstats->syncp);
goto out;
}
}
@@ -352,16 +362,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
{
struct l2tp_tunnel *tunnel = session->tunnel;
int length = L2TP_SKB_CB(skb)->length;
+ struct l2tp_stats *tstats, *sstats;
/* We're about to requeue the skb, so return resources
* to its current owner (a socket receive buffer).
*/
skb_orphan(skb);
- tunnel->stats.rx_packets++;
- tunnel->stats.rx_bytes += length;
- session->stats.rx_packets++;
- session->stats.rx_bytes += length;
+ tstats = &tunnel->stats;
+ u64_stats_update_begin(&tstats->syncp);
+ sstats = &session->stats;
+ u64_stats_update_begin(&sstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += length;
+ sstats->rx_packets++;
+ sstats->rx_bytes += length;
+ u64_stats_update_end(&tstats->syncp);
+ u64_stats_update_end(&sstats->syncp);
if (L2TP_SKB_CB(skb)->has_seq) {
/* Bump our Nr */
@@ -371,8 +388,8 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
else
session->nr &= 0xffffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
+ session->name, session->nr);
}
/* call private receive handler */
@@ -392,6 +409,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
{
struct sk_buff *skb;
struct sk_buff *tmp;
+ struct l2tp_stats *sstats;
/* If the pkt at the head of the queue has the nr that we
* expect to send up next, dequeue it and any other
@@ -399,16 +417,19 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
*/
start:
spin_lock_bh(&session->reorder_q.lock);
+ sstats = &session->stats;
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
- session->stats.rx_seq_discards++;
- session->stats.rx_errors++;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %u len %d discarded (too old), "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_seq_discards++;
+ sstats->rx_errors++;
+ u64_stats_update_end(&sstats->syncp);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ session->reorder_skip = 1;
__skb_unlink(skb, &session->reorder_q);
kfree_skb(skb);
if (session->deref)
@@ -417,13 +438,20 @@ start:
}
if (L2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_skip) {
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: advancing nr to next pkt: %u -> %u",
+ session->name, session->nr,
+ L2TP_SKB_CB(skb)->ns);
+ session->reorder_skip = 0;
+ session->nr = L2TP_SKB_CB(skb)->ns;
+ }
if (L2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %u len %d, "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
goto out;
}
}
@@ -446,21 +474,43 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
{
struct udphdr *uh = udp_hdr(skb);
u16 ulen = ntohs(uh->len);
- struct inet_sock *inet;
__wsum psum;
- if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
+ if (sk->sk_no_check || skb_csum_unnecessary(skb))
return 0;
- inet = inet_sk(sk);
- psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
- IPPROTO_UDP, 0);
-
- if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
- !csum_fold(csum_add(psum, skb->csum)))
- return 0;
-
- skb->csum = psum;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6) {
+ if (!uh->check) {
+ LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
+ return 1;
+ }
+ if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, ulen,
+ IPPROTO_UDP, skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return 0;
+ }
+ skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len, IPPROTO_UDP,
+ 0));
+ } else
+#endif
+ {
+ struct inet_sock *inet;
+ if (!uh->check)
+ return 0;
+ inet = inet_sk(sk);
+ psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
+ ulen, IPPROTO_UDP, 0);
+
+ if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !csum_fold(csum_add(psum, skb->csum)))
+ return 0;
+ skb->csum = psum;
+ }
return __skb_checksum_complete(skb);
}
@@ -532,6 +582,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
struct l2tp_tunnel *tunnel = session->tunnel;
int offset;
u32 ns, nr;
+ struct l2tp_stats *sstats = &session->stats;
/* The ref count is increased since we now hold a pointer to
* the session. Take care to decrement the refcnt when exiting
@@ -544,10 +595,13 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: cookie mismatch (%u/%u). Discarding.\n",
- tunnel->name, tunnel->tunnel_id, session->session_id);
- session->stats.rx_cookie_discards++;
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id,
+ session->session_id);
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_cookie_discards++;
+ u64_stats_update_end(&sstats->syncp);
goto discard;
}
ptr += session->peer_cookie_len;
@@ -573,9 +627,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
L2TP_SKB_CB(skb)->ns = ns;
L2TP_SKB_CB(skb)->has_seq = 1;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%u, nr=%u, session nr=%u\n",
- session->name, ns, nr, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: recv data ns=%u, nr=%u, session nr=%u\n",
+ session->name, ns, nr, session->nr);
}
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
u32 l2h = ntohl(*(__be32 *) ptr);
@@ -587,9 +641,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
L2TP_SKB_CB(skb)->ns = ns;
L2TP_SKB_CB(skb)->has_seq = 1;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%u, session nr=%u\n",
- session->name, ns, session->nr);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: recv data ns=%u, session nr=%u\n",
+ session->name, ns, session->nr);
}
}
@@ -602,9 +656,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* configure it so.
*/
if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
+ l2tp_info(session, L2TP_MSG_SEQ,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
session->send_seq = -1;
l2tp_session_set_header_len(session, tunnel->version);
}
@@ -613,10 +667,12 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* If user has configured mandatory sequence numbers, discard.
*/
if (session->recv_seq) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
+ l2tp_warn(session, L2TP_MSG_SEQ,
+ "%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_seq_discards++;
+ u64_stats_update_end(&sstats->syncp);
goto discard;
}
@@ -626,16 +682,18 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* LAC is broken. Discard the frame.
*/
if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
+ l2tp_info(session, L2TP_MSG_SEQ,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
session->send_seq = 0;
l2tp_session_set_header_len(session, tunnel->version);
} else if (session->send_seq) {
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
+ l2tp_warn(session, L2TP_MSG_SEQ,
+ "%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_seq_discards++;
+ u64_stats_update_end(&sstats->syncp);
goto discard;
}
}
@@ -689,13 +747,14 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* packets
*/
if (L2TP_SKB_CB(skb)->ns != session->nr) {
- session->stats.rx_seq_discards++;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %u len %d discarded, "
- "waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_seq_discards++;
+ u64_stats_update_end(&sstats->syncp);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
goto discard;
}
skb_queue_tail(&session->reorder_q, skb);
@@ -716,7 +775,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
return;
discard:
- session->stats.rx_errors++;
+ u64_stats_update_begin(&sstats->syncp);
+ sstats->rx_errors++;
+ u64_stats_update_end(&sstats->syncp);
kfree_skb(skb);
if (session->deref)
@@ -739,9 +800,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
unsigned char *ptr, *optr;
u16 hdrflags;
u32 tunnel_id, session_id;
- int offset;
u16 version;
int length;
+ struct l2tp_stats *tstats;
if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
goto discard_bad_csum;
@@ -751,8 +812,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* Short packet? */
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: recv short packet (len=%d)\n",
+ tunnel->name, skb->len);
goto error;
}
@@ -762,14 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
if (!pskb_may_pull(skb, length))
goto error;
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", skb->data[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
}
/* Point to L2TP header */
@@ -781,9 +837,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* Check protocol version */
version = hdrflags & L2TP_HDR_VER_MASK;
if (version != tunnel->version) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: recv protocol version mismatch: got %d expected %d\n",
- tunnel->name, version, tunnel->version);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
goto error;
}
@@ -792,8 +848,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
/* If type is control packet, it is handled by userspace. */
if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
+ l2tp_dbg(tunnel, L2TP_MSG_DATA,
+ "%s: recv control packet, len=%d\n",
+ tunnel->name, length);
goto error;
}
@@ -821,9 +878,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
if (!session || !session->recv_skb) {
/* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
- "%s: no session found (%u/%u). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
+ l2tp_info(tunnel, L2TP_MSG_DATA,
+ "%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
goto error;
}
@@ -834,7 +891,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
discard_bad_csum:
LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
- tunnel->stats.rx_errors++;
+ tstats = &tunnel->stats;
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_errors++;
+ u64_stats_update_end(&tstats->syncp);
kfree_skb(skb);
return 0;
@@ -860,8 +920,8 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (tunnel == NULL)
goto pass_up;
- PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
+ l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
+ tunnel->name, skb->len);
if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
goto pass_up_put;
@@ -903,8 +963,8 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
*bufp++ = 0;
session->ns++;
session->ns &= 0xffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %u\n", session->name, session->ns);
+ l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
+ session->name, session->ns);
}
return bufp - optr;
@@ -940,8 +1000,9 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
l2h = 0x40000000 | session->ns;
session->ns++;
session->ns &= 0xffffff;
- PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %u\n", session->name, session->ns);
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: updated ns to %u\n",
+ session->name, session->ns);
}
*((__be32 *) bufp) = htonl(l2h);
@@ -960,46 +1021,50 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int len = skb->len;
int error;
+ struct l2tp_stats *tstats, *sstats;
/* Debug */
if (session->send_seq)
- PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%u\n", session->name,
- data_len, session->ns - 1);
+ l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
+ session->name, data_len, session->ns - 1);
else
- PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, data_len);
+ l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
+ session->name, data_len);
if (session->debug & L2TP_MSG_DATA) {
- int i;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
unsigned char *datap = skb->data + uhlen;
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < (len - uhlen); i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
+ pr_debug("%s: xmit\n", session->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ datap, min_t(size_t, 32, len - uhlen));
}
/* Queue the packet to IP for output */
skb->local_df = 1;
- error = ip_queue_xmit(skb, fl);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (skb->sk->sk_family == PF_INET6)
+ error = inet6_csk_xmit(skb, NULL);
+ else
+#endif
+ error = ip_queue_xmit(skb, fl);
/* Update stats */
+ tstats = &tunnel->stats;
+ u64_stats_update_begin(&tstats->syncp);
+ sstats = &session->stats;
+ u64_stats_update_begin(&sstats->syncp);
if (error >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
+ tstats->tx_packets++;
+ tstats->tx_bytes += len;
+ sstats->tx_packets++;
+ sstats->tx_bytes += len;
} else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
+ tstats->tx_errors++;
+ sstats->tx_errors++;
}
+ u64_stats_update_end(&tstats->syncp);
+ u64_stats_update_end(&sstats->syncp);
return 0;
}
@@ -1021,6 +1086,31 @@ static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
skb->destructor = l2tp_sock_wfree;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
+ int udp_len)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (!skb_dst(skb) || !skb_dst(skb)->dev ||
+ !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
+ __wsum csum = skb_checksum(skb, 0, udp_len, 0);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+ IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+ udp_len, IPPROTO_UDP, 0);
+ }
+}
+#endif
+
/* If caller requires the skb to have a ppp header, the header must be
* inserted in the skb data before calling this function.
*/
@@ -1089,6 +1179,11 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
uh->check = 0;
/* Calculate UDP checksum if configured to do so */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6)
+ l2tp_xmit_ipv6_csum(sk, skb, udp_len);
+ else
+#endif
if (sk->sk_no_check == UDP_CSUM_NOXMIT)
skb->ip_summed = CHECKSUM_NONE;
else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
@@ -1141,8 +1236,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
if (tunnel == NULL)
goto end;
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
/* Close all sessions */
l2tp_tunnel_closeall(tunnel);
@@ -1184,8 +1278,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
BUG_ON(tunnel == NULL);
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
+ tunnel->name);
write_lock_bh(&tunnel->hlist_lock);
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
@@ -1193,8 +1287,8 @@ again:
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
session = hlist_entry(walk, struct l2tp_session, hlist);
- PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
+ l2tp_info(session, L2TP_MSG_CONTROL,
+ "%s: closing session\n", session->name);
hlist_del_init(&session->hlist);
@@ -1247,8 +1341,7 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
BUG_ON(atomic_read(&tunnel->ref_count) != 0);
BUG_ON(tunnel->sock != NULL);
- PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
- "%s: free...\n", tunnel->name);
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1268,31 +1361,69 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
{
int err = -EINVAL;
struct sockaddr_in udp_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 udp6_addr;
+ struct sockaddr_l2tpip6 ip6_addr;
+#endif
struct sockaddr_l2tpip ip_addr;
struct socket *sock = NULL;
switch (cfg->encap) {
case L2TP_ENCAPTYPE_UDP:
- err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
- if (err < 0)
- goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (cfg->local_ip6 && cfg->peer_ip6) {
+ err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
+ if (err < 0)
+ goto out;
- sock = *sockp;
+ sock = *sockp;
- memset(&udp_addr, 0, sizeof(udp_addr));
- udp_addr.sin_family = AF_INET;
- udp_addr.sin_addr = cfg->local_ip;
- udp_addr.sin_port = htons(cfg->local_udp_port);
- err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
- if (err < 0)
- goto out;
+ memset(&udp6_addr, 0, sizeof(udp6_addr));
+ udp6_addr.sin6_family = AF_INET6;
+ memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
+ sizeof(udp6_addr.sin6_addr));
+ udp6_addr.sin6_port = htons(cfg->local_udp_port);
+ err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
+ sizeof(udp6_addr));
+ if (err < 0)
+ goto out;
- udp_addr.sin_family = AF_INET;
- udp_addr.sin_addr = cfg->peer_ip;
- udp_addr.sin_port = htons(cfg->peer_udp_port);
- err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
- if (err < 0)
- goto out;
+ udp6_addr.sin6_family = AF_INET6;
+ memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
+ sizeof(udp6_addr.sin6_addr));
+ udp6_addr.sin6_port = htons(cfg->peer_udp_port);
+ err = kernel_connect(sock,
+ (struct sockaddr *) &udp6_addr,
+ sizeof(udp6_addr), 0);
+ if (err < 0)
+ goto out;
+ } else
+#endif
+ {
+ err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&udp_addr, 0, sizeof(udp_addr));
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->local_ip;
+ udp_addr.sin_port = htons(cfg->local_udp_port);
+ err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
+ sizeof(udp_addr));
+ if (err < 0)
+ goto out;
+
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->peer_ip;
+ udp_addr.sin_port = htons(cfg->peer_udp_port);
+ err = kernel_connect(sock,
+ (struct sockaddr *) &udp_addr,
+ sizeof(udp_addr), 0);
+ if (err < 0)
+ goto out;
+ }
if (!cfg->use_udp_checksums)
sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
@@ -1300,27 +1431,61 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
break;
case L2TP_ENCAPTYPE_IP:
- err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
- if (err < 0)
- goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (cfg->local_ip6 && cfg->peer_ip6) {
+ err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
+ sockp);
+ if (err < 0)
+ goto out;
- sock = *sockp;
+ sock = *sockp;
- memset(&ip_addr, 0, sizeof(ip_addr));
- ip_addr.l2tp_family = AF_INET;
- ip_addr.l2tp_addr = cfg->local_ip;
- ip_addr.l2tp_conn_id = tunnel_id;
- err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
- if (err < 0)
- goto out;
+ memset(&ip6_addr, 0, sizeof(ip6_addr));
+ ip6_addr.l2tp_family = AF_INET6;
+ memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
+ sizeof(ip6_addr.l2tp_addr));
+ ip6_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
+ sizeof(ip6_addr));
+ if (err < 0)
+ goto out;
- ip_addr.l2tp_family = AF_INET;
- ip_addr.l2tp_addr = cfg->peer_ip;
- ip_addr.l2tp_conn_id = peer_tunnel_id;
- err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
- if (err < 0)
- goto out;
+ ip6_addr.l2tp_family = AF_INET6;
+ memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
+ sizeof(ip6_addr.l2tp_addr));
+ ip6_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock,
+ (struct sockaddr *) &ip6_addr,
+ sizeof(ip6_addr), 0);
+ if (err < 0)
+ goto out;
+ } else
+#endif
+ {
+ err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
+ sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&ip_addr, 0, sizeof(ip_addr));
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->local_ip;
+ ip_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
+ sizeof(ip_addr));
+ if (err < 0)
+ goto out;
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->peer_ip;
+ ip_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
+ sizeof(ip_addr), 0);
+ if (err < 0)
+ goto out;
+ }
break;
default:
@@ -1357,7 +1522,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
err = -EBADF;
sock = sockfd_lookup(fd, &err);
if (!sock) {
- printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
tunnel_id, fd, err);
goto err;
}
@@ -1373,7 +1538,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
case L2TP_ENCAPTYPE_UDP:
err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_UDP) {
- printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
goto err;
}
@@ -1381,7 +1546,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
case L2TP_ENCAPTYPE_IP:
err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_L2TP) {
- printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
goto err;
}
@@ -1424,6 +1589,12 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6)
+ udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
}
sk->sk_user_data = tunnel;
@@ -1577,7 +1748,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
session->session_id = session_id;
session->peer_session_id = peer_session_id;
- session->nr = 1;
+ session->nr = 0;
sprintf(&session->name[0], "sess %u/%u",
tunnel->tunnel_id, session->session_id);
@@ -1683,7 +1854,7 @@ static int __init l2tp_init(void)
if (rc)
goto out;
- printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
+ pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
out:
return rc;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e79fab..a38ec6cdeee1 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -45,6 +45,7 @@ struct l2tp_stats {
u64 rx_oos_packets;
u64 rx_errors;
u64 rx_cookie_discards;
+ struct u64_stats_sync syncp;
};
struct l2tp_tunnel;
@@ -54,15 +55,15 @@ struct l2tp_tunnel;
*/
struct l2tp_session_cfg {
enum l2tp_pwtype pw_type;
- unsigned data_seq:2; /* data sequencing level
+ unsigned int data_seq:2; /* data sequencing level
* 0 => none, 1 => IP only,
* 2 => all
*/
- unsigned recv_seq:1; /* expect receive packets with
+ unsigned int recv_seq:1; /* expect receive packets with
* sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
+ unsigned int send_seq:1; /* send packets with sequence
* numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
+ unsigned int lns_mode:1; /* behave as LNS? LAC enables
* sequence numbers under
* control of LNS. */
int debug; /* bitmask of debug message
@@ -107,21 +108,22 @@ struct l2tp_session {
char name[32]; /* for logging */
char ifname[IFNAMSIZ];
- unsigned data_seq:2; /* data sequencing level
+ unsigned int data_seq:2; /* data sequencing level
* 0 => none, 1 => IP only,
* 2 => all
*/
- unsigned recv_seq:1; /* expect receive packets with
+ unsigned int recv_seq:1; /* expect receive packets with
* sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
+ unsigned int send_seq:1; /* send packets with sequence
* numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
+ unsigned int lns_mode:1; /* behave as LNS? LAC enables
* sequence numbers under
* control of LNS. */
int debug; /* bitmask of debug message
* categories */
int reorder_timeout; /* configured reorder timeout
* (in jiffies) */
+ int reorder_skip; /* set if skip to next nr */
int mtu;
int mru;
enum l2tp_pwtype pwtype;
@@ -150,6 +152,10 @@ struct l2tp_tunnel_cfg {
/* Used only for kernel-created sockets */
struct in_addr local_ip;
struct in_addr peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr *local_ip6;
+ struct in6_addr *peer_ip6;
+#endif
u16 local_udp_port;
u16 peer_udp_port;
unsigned int use_udp_checksums:1;
@@ -255,17 +261,36 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
}
#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_session_inc_refcount(_s) do { \
- printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
- l2tp_session_inc_refcount_1(_s); \
- } while (0)
-#define l2tp_session_dec_refcount(_s) do { \
- printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
- l2tp_session_dec_refcount_1(_s); \
- } while (0)
+#define l2tp_session_inc_refcount(_s) \
+do { \
+ pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_s)->name, \
+ atomic_read(&_s->ref_count)); \
+ l2tp_session_inc_refcount_1(_s); \
+} while (0)
+#define l2tp_session_dec_refcount(_s) \
+do { \
+ pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \
+ __func__, __LINE__, (_s)->name, \
+ atomic_read(&_s->ref_count)); \
+ l2tp_session_dec_refcount_1(_s); \
+} while (0)
#else
#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
#endif
+#define l2tp_printk(ptr, type, func, fmt, ...) \
+do { \
+ if (((ptr)->debug) & (type)) \
+ func(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define l2tp_warn(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__)
+#define l2tp_info(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__)
+#define l2tp_dbg(ptr, type, fmt, ...) \
+ l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
+
#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 76130134bfa6..c3813bc84552 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
@@ -122,6 +124,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
if (tunnel->sock) {
struct inet_sock *inet = inet_sk(tunnel->sock);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (tunnel->sock->sk_family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+ seq_printf(m, " from %pI6c to %pI6c\n",
+ &np->saddr, &np->daddr);
+ } else
+#endif
seq_printf(m, " from %pI4 to %pI4\n",
&inet->inet_saddr, &inet->inet_daddr);
if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
@@ -317,11 +327,11 @@ static int __init l2tp_debugfs_init(void)
if (tunnels == NULL)
rc = -EIO;
- printk(KERN_INFO "L2TP debugfs support\n");
+ pr_info("L2TP debugfs support\n");
out:
if (rc)
- printk(KERN_WARNING "l2tp debugfs: unable to init\n");
+ pr_warn("unable to init\n");
return rc;
}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 63fe5f353f04..443591d629ca 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
@@ -115,21 +117,14 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
- int offset;
u8 *ptr = skb->data;
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto error;
- printk(KERN_DEBUG "%s: eth recv: ", session->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: eth recv\n", session->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -308,7 +303,7 @@ static int __init l2tp_eth_init(void)
if (err)
goto out_unreg;
- printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
+ pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 55670ec3cd0f..70614e7affab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -32,15 +34,8 @@ struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
- __u32 conn_id;
- __u32 peer_conn_id;
-
- __u64 tx_packets;
- __u64 tx_bytes;
- __u64 tx_errors;
- __u64 rx_packets;
- __u64 rx_bytes;
- __u64 rx_errors;
+ u32 conn_id;
+ u32 peer_conn_id;
};
static DEFINE_RWLOCK(l2tp_ip_lock);
@@ -127,7 +122,6 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
- int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -162,14 +156,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
- printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
+ pr_debug("%s: ip recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
@@ -232,7 +220,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
- hlist_del_init(&sk->sk_node);
+ sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
sk_common_release(sk);
}
@@ -251,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
- int ret = -EINVAL;
+ int ret;
int chk_addr_ret;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_l2tpip))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET)
+ return -EINVAL;
+
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -271,7 +266,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
- inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
+ if (addr->l2tp_addr.s_addr)
+ inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
@@ -283,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
out:
release_sock(sk);
@@ -297,72 +295,42 @@ out_in_use:
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
- struct inet_sock *inet = inet_sk(sk);
- struct flowi4 *fl4;
- struct rtable *rt;
- __be32 saddr;
- int oif, rc;
-
- rc = -EINVAL;
- if (addr_len < sizeof(*lsa))
- goto out;
-
- rc = -EAFNOSUPPORT;
- if (lsa->l2tp_family != AF_INET)
- goto out;
-
- lock_sock(sk);
+ int rc;
- sk_dst_reset(sk);
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
- oif = sk->sk_bound_dev_if;
- saddr = inet->inet_saddr;
+ if (addr_len < sizeof(*lsa))
+ return -EINVAL;
- rc = -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
- goto out;
+ return -EINVAL;
- fl4 = &inet->cork.fl.u.ip4;
- rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr,
- RT_CONN_FLAGS(sk), oif,
- IPPROTO_L2TP,
- 0, 0, sk, true);
- if (IS_ERR(rt)) {
- rc = PTR_ERR(rt);
- if (rc == -ENETUNREACH)
- IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
- goto out;
- }
+ rc = ip4_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ return rc;
- rc = -ENETUNREACH;
- if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
- ip_rt_put(rt);
- goto out;
- }
+ lock_sock(sk);
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
- if (!inet->inet_saddr)
- inet->inet_saddr = fl4->saddr;
- if (!inet->inet_rcv_saddr)
- inet->inet_rcv_saddr = fl4->saddr;
- inet->inet_daddr = fl4->daddr;
- sk->sk_state = TCP_ESTABLISHED;
- inet->inet_id = jiffies;
-
- sk_dst_set(sk, &rt->dst);
-
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
- rc = 0;
-out:
release_sock(sk);
return rc;
}
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -413,7 +381,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
{
struct sk_buff *skb;
int rc;
- struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = NULL;
struct flowi4 *fl4;
@@ -441,8 +408,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
daddr = lip->l2tp_addr.s_addr;
} else {
+ rc = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
- return -EDESTADDRREQ;
+ goto out;
daddr = inet->inet_daddr;
connected = 1;
@@ -512,14 +480,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
rcu_read_unlock();
error:
- /* Update stats */
- if (rc >= 0) {
- lsa->tx_packets++;
- lsa->tx_bytes += len;
+ if (rc >= 0)
rc = len;
- } else {
- lsa->tx_errors++;
- }
out:
release_sock(sk);
@@ -537,7 +499,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
- struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
@@ -579,15 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
done:
skb_free_datagram(sk, skb);
out:
- if (err) {
- lsk->rx_errors++;
- return err;
- }
-
- lsk->rx_packets++;
- lsk->rx_bytes += copied;
-
- return copied;
+ return err ? err : copied;
}
static struct proto l2tp_ip_prot = {
@@ -597,7 +550,7 @@ static struct proto l2tp_ip_prot = {
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
- .disconnect = udp_disconnect,
+ .disconnect = l2tp_ip_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
@@ -655,7 +608,7 @@ static int __init l2tp_ip_init(void)
{
int err;
- printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
+ pr_info("L2TP IP encapsulation support (L2TPv3)\n");
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
new file mode 100644
index 000000000000..35e1e4bde587
--- /dev/null
+++ b/net/l2tp/l2tp_ip6.c
@@ -0,0 +1,803 @@
+/*
+ * L2TPv3 IP encapsulation support for IPv6
+ *
+ * Copyright (c) 2012 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include <net/transp_v6.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip6_sock {
+ /* inet_sock has to be the first member of l2tp_ip6_sock */
+ struct inet_sock inet;
+
+ u32 conn_id;
+ u32 peer_conn_id;
+
+ /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
+ inet6_sk_generic */
+ struct ipv6_pinfo inet6;
+};
+
+static DEFINE_RWLOCK(l2tp_ip6_lock);
+static struct hlist_head l2tp_ip6_table;
+static struct hlist_head l2tp_ip6_bind_table;
+
+static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
+{
+ return (struct l2tp_ip6_sock *)sk;
+}
+
+static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
+ struct in6_addr *laddr,
+ int dif, u32 tunnel_id)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
+ struct in6_addr *addr = inet6_rcv_saddr(sk);
+ struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+
+ if (l2tp == NULL)
+ continue;
+
+ if ((l2tp->conn_id == tunnel_id) &&
+ net_eq(sock_net(sk), net) &&
+ !(addr && ipv6_addr_equal(addr, laddr)) &&
+ !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ goto found;
+ }
+
+ sk = NULL;
+found:
+ return sk;
+}
+
+static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
+ struct in6_addr *laddr,
+ int dif, u32 tunnel_id)
+{
+ struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
+ if (sk)
+ sock_hold(sk);
+
+ return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | (32 bits of zeros) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Control Connection ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns | Nr |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip6_recv(struct sk_buff *skb)
+{
+ struct sock *sk;
+ u32 session_id;
+ u32 tunnel_id;
+ unsigned char *ptr, *optr;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int length;
+
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+
+ if (!pskb_may_pull(skb, 4))
+ goto discard;
+
+ session_id = ntohl(*((__be32 *) ptr));
+ ptr += 4;
+
+ /* RFC3931: L2TP/IP packets have the first 4 bytes containing
+ * the session_id. If it is 0, the packet is a L2TP control
+ * frame and the session_id value can be discarded.
+ */
+ if (session_id == 0) {
+ __skb_pull(skb, 4);
+ goto pass_up;
+ }
+
+ /* Ok, this is a data packet. Lookup the session. */
+ session = l2tp_session_find(&init_net, NULL, session_id);
+ if (session == NULL)
+ goto discard;
+
+ tunnel = session->tunnel;
+ if (tunnel == NULL)
+ goto discard;
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & L2TP_MSG_DATA) {
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto discard;
+
+ pr_debug("%s: ip recv\n", tunnel->name);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
+ tunnel->recv_payload_hook);
+ return 0;
+
+pass_up:
+ /* Get the tunnel_id from the L2TP header */
+ if (!pskb_may_pull(skb, 12))
+ goto discard;
+
+ if ((skb->data[0] & 0xc0) != 0xc0)
+ goto discard;
+
+ tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+ tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+ if (tunnel != NULL)
+ sk = tunnel->sock;
+ else {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ read_lock_bh(&l2tp_ip6_lock);
+ sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+ 0, tunnel_id);
+ read_unlock_bh(&l2tp_ip6_lock);
+ }
+
+ if (sk == NULL)
+ goto discard;
+
+ sock_hold(sk);
+
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto discard_put;
+
+ nf_reset(skb);
+
+ return sk_receive_skb(sk, skb, 1);
+
+discard_put:
+ sock_put(sk);
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_add_node(sk, &l2tp_ip6_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ return 0;
+}
+
+static void l2tp_ip6_close(struct sock *sk, long timeout)
+{
+ write_lock_bh(&l2tp_ip6_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ sk_common_release(sk);
+}
+
+static void l2tp_ip6_destroy_sock(struct sock *sk)
+{
+ lock_sock(sk);
+ ip6_flush_pending_frames(sk);
+ release_sock(sk);
+
+ inet6_destroy_sock(sk);
+}
+
+static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+ __be32 v4addr = 0;
+ int addr_type;
+ int err;
+
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET6)
+ return -EINVAL;
+ if (addr_len < sizeof(*addr))
+ return -EINVAL;
+
+ addr_type = ipv6_addr_type(&addr->l2tp_addr);
+
+ /* l2tp_ip6 sockets are IPv6 only */
+ if (addr_type == IPV6_ADDR_MAPPED)
+ return -EADDRNOTAVAIL;
+
+ /* L2TP is point-point, not multicast */
+ if (addr_type & IPV6_ADDR_MULTICAST)
+ return -EADDRNOTAVAIL;
+
+ err = -EADDRINUSE;
+ read_lock_bh(&l2tp_ip6_lock);
+ if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+ sk->sk_bound_dev_if, addr->l2tp_conn_id))
+ goto out_in_use;
+ read_unlock_bh(&l2tp_ip6_lock);
+
+ lock_sock(sk);
+
+ err = -EINVAL;
+ if (sk->sk_state != TCP_CLOSE)
+ goto out_unlock;
+
+ /* Check if the address belongs to the host. */
+ rcu_read_lock();
+ if (addr_type != IPV6_ADDR_ANY) {
+ struct net_device *dev = NULL;
+
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ addr->l2tp_scope_id) {
+ /* Override any existing binding, if another
+ * one is supplied by user.
+ */
+ sk->sk_bound_dev_if = addr->l2tp_scope_id;
+ }
+
+ /* Binding to link-local address requires an
+ interface */
+ if (!sk->sk_bound_dev_if)
+ goto out_unlock_rcu;
+
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(sock_net(sk),
+ sk->sk_bound_dev_if);
+ if (!dev)
+ goto out_unlock_rcu;
+ }
+
+ /* ipv4 addr of the socket is invalid. Only the
+ * unspecified and mapped address have a v4 equivalent.
+ */
+ v4addr = LOOPBACK4_IPV6;
+ err = -EADDRNOTAVAIL;
+ if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
+ goto out_unlock_rcu;
+ }
+ rcu_read_unlock();
+
+ inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
+ np->rcv_saddr = addr->l2tp_addr;
+ np->saddr = addr->l2tp_addr;
+
+ l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
+ return 0;
+
+out_unlock_rcu:
+ rcu_read_unlock();
+out_unlock:
+ release_sock(sk);
+ return err;
+
+out_in_use:
+ read_unlock_bh(&l2tp_ip6_lock);
+ return err;
+}
+
+static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+{
+ struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct in6_addr *daddr;
+ int addr_type;
+ int rc;
+
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
+
+ if (addr_len < sizeof(*lsa))
+ return -EINVAL;
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+ if (addr_type & IPV6_ADDR_MULTICAST)
+ return -EINVAL;
+
+ if (addr_type & IPV6_ADDR_MAPPED) {
+ daddr = &usin->sin6_addr;
+ if (ipv4_is_multicast(daddr->s6_addr32[3]))
+ return -EINVAL;
+ }
+
+ rc = ip6_datagram_connect(sk, uaddr, addr_len);
+
+ lock_sock(sk);
+
+ l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip6_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ release_sock(sk);
+
+ return rc;
+}
+
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
+static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
+ struct sock *sk = sock->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
+
+ lsa->l2tp_family = AF_INET6;
+ lsa->l2tp_flowinfo = 0;
+ lsa->l2tp_scope_id = 0;
+ if (peer) {
+ if (!lsk->peer_conn_id)
+ return -ENOTCONN;
+ lsa->l2tp_conn_id = lsk->peer_conn_id;
+ lsa->l2tp_addr = np->daddr;
+ if (np->sndflow)
+ lsa->l2tp_flowinfo = np->flow_label;
+ } else {
+ if (ipv6_addr_any(&np->rcv_saddr))
+ lsa->l2tp_addr = np->saddr;
+ else
+ lsa->l2tp_addr = np->rcv_saddr;
+
+ lsa->l2tp_conn_id = lsk->conn_id;
+ }
+ if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+ lsa->l2tp_scope_id = sk->sk_bound_dev_if;
+ *uaddr_len = sizeof(*lsa);
+ return 0;
+}
+
+static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+ goto drop;
+
+ return 0;
+
+drop:
+ IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+ return -1;
+}
+
+static int l2tp_ip6_push_pending_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+ __be32 *transhdr = NULL;
+ int err = 0;
+
+ skb = skb_peek(&sk->sk_write_queue);
+ if (skb == NULL)
+ goto out;
+
+ transhdr = (__be32 *)skb_transport_header(skb);
+ *transhdr = 0;
+
+ err = ip6_push_pending_frames(sk);
+
+out:
+ return err;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len)
+{
+ struct ipv6_txoptions opt_space;
+ struct sockaddr_l2tpip6 *lsa =
+ (struct sockaddr_l2tpip6 *) msg->msg_name;
+ struct in6_addr *daddr, *final_p, final;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
+ struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
+ int addr_len = msg->msg_namelen;
+ int hlimit = -1;
+ int tclass = -1;
+ int dontfrag = -1;
+ int transhdrlen = 4; /* zero session-id */
+ int ulen = len + transhdrlen;
+ int err;
+
+ /* Rough check on arithmetic overflow,
+ better check is made in ip6_append_data().
+ */
+ if (len > INT_MAX)
+ return -EMSGSIZE;
+
+ /* Mirror BSD error message compatibility */
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Get and verify the address.
+ */
+ memset(&fl6, 0, sizeof(fl6));
+
+ fl6.flowi6_mark = sk->sk_mark;
+
+ if (lsa) {
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ daddr = &lsa->l2tp_addr;
+ if (np->sndflow) {
+ fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
+ if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ daddr = &flowlabel->dst;
+ }
+ }
+
+ /*
+ * Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ ipv6_addr_equal(daddr, &np->daddr))
+ daddr = &np->daddr;
+
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ lsa->l2tp_scope_id &&
+ ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = lsa->l2tp_scope_id;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
+
+ daddr = &np->daddr;
+ fl6.flowlabel = np->flow_label;
+ }
+
+ if (fl6.flowi6_oif == 0)
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+
+ if (msg->msg_controllen) {
+ opt = &opt_space;
+ memset(opt, 0, sizeof(struct ipv6_txoptions));
+ opt->tot_len = sizeof(struct ipv6_txoptions);
+
+ err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
+ return err;
+ }
+ if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
+ flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ }
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
+ }
+
+ if (opt == NULL)
+ opt = np->opt;
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ opt = ipv6_fixup_options(&opt_space, opt);
+
+ fl6.flowi6_proto = sk->sk_protocol;
+ if (!ipv6_addr_any(daddr))
+ fl6.daddr = *daddr;
+ else
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
+ fl6.saddr = np->saddr;
+
+ final_p = fl6_update_dst(&fl6, opt, &final);
+
+ if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ fl6.flowi6_oif = np->mcast_oif;
+ else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
+
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+ }
+
+ if (hlimit < 0) {
+ if (ipv6_addr_is_multicast(&fl6.daddr))
+ hlimit = np->mcast_hops;
+ else
+ hlimit = np->hop_limit;
+ if (hlimit < 0)
+ hlimit = ip6_dst_hoplimit(dst);
+ }
+
+ if (tclass < 0)
+ tclass = np->tclass;
+
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
+
+ if (msg->msg_flags & MSG_CONFIRM)
+ goto do_confirm;
+
+back_from_confirm:
+ lock_sock(sk);
+ err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
+ ulen, transhdrlen, hlimit, tclass, opt,
+ &fl6, (struct rt6_info *)dst,
+ msg->msg_flags, dontfrag);
+ if (err)
+ ip6_flush_pending_frames(sk);
+ else if (!(msg->msg_flags & MSG_MORE))
+ err = l2tp_ip6_push_pending_frames(sk);
+ release_sock(sk);
+done:
+ dst_release(dst);
+out:
+ fl6_sock_release(flowlabel);
+
+ return err < 0 ? err : len;
+
+do_confirm:
+ dst_confirm(dst);
+ if (!(msg->msg_flags & MSG_PROBE) || len)
+ goto back_from_confirm;
+ err = 0;
+ goto done;
+}
+
+static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sk_buff *skb;
+
+ if (flags & MSG_OOB)
+ goto out;
+
+ if (addr_len)
+ *addr_len = sizeof(*lsa);
+
+ if (flags & MSG_ERRQUEUE)
+ return ipv6_recv_error(sk, msg, len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto done;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+ if (lsa) {
+ lsa->l2tp_family = AF_INET6;
+ lsa->l2tp_unused = 0;
+ lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
+ lsa->l2tp_flowinfo = 0;
+ lsa->l2tp_scope_id = 0;
+ if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+ lsa->l2tp_scope_id = IP6CB(skb)->iif;
+ }
+
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ return err ? err : copied;
+}
+
+static struct proto l2tp_ip6_prot = {
+ .name = "L2TP/IPv6",
+ .owner = THIS_MODULE,
+ .init = l2tp_ip6_open,
+ .close = l2tp_ip6_close,
+ .bind = l2tp_ip6_bind,
+ .connect = l2tp_ip6_connect,
+ .disconnect = l2tp_ip6_disconnect,
+ .ioctl = udp_ioctl,
+ .destroy = l2tp_ip6_destroy_sock,
+ .setsockopt = ipv6_setsockopt,
+ .getsockopt = ipv6_getsockopt,
+ .sendmsg = l2tp_ip6_sendmsg,
+ .recvmsg = l2tp_ip6_recvmsg,
+ .backlog_rcv = l2tp_ip6_backlog_recv,
+ .hash = inet_hash,
+ .unhash = inet_unhash,
+ .obj_size = sizeof(struct l2tp_ip6_sock),
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_ipv6_setsockopt,
+ .compat_getsockopt = compat_ipv6_getsockopt,
+#endif
+};
+
+static const struct proto_ops l2tp_ip6_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+ .bind = inet6_bind,
+ .connect = inet_dgram_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = l2tp_ip6_getname,
+ .poll = datagram_poll,
+ .ioctl = inet6_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_sock_common_setsockopt,
+ .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+static struct inet_protosw l2tp_ip6_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_L2TP,
+ .prot = &l2tp_ip6_prot,
+ .ops = &l2tp_ip6_ops,
+ .no_check = 0,
+};
+
+static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
+ .handler = l2tp_ip6_recv,
+};
+
+static int __init l2tp_ip6_init(void)
+{
+ int err;
+
+ pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+
+ err = proto_register(&l2tp_ip6_prot, 1);
+ if (err != 0)
+ goto out;
+
+ err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+ if (err)
+ goto out1;
+
+ inet6_register_protosw(&l2tp_ip6_protosw);
+ return 0;
+
+out1:
+ proto_unregister(&l2tp_ip6_prot);
+out:
+ return err;
+}
+
+static void __exit l2tp_ip6_exit(void)
+{
+ inet6_unregister_protosw(&l2tp_ip6_protosw);
+ inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+ proto_unregister(&l2tp_ip6_prot);
+}
+
+module_init(l2tp_ip6_init);
+module_exit(l2tp_ip6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
+MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
+MODULE_VERSION("1.0");
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a09458b..ddc553e76671 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -14,6 +14,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/udp.h>
@@ -133,10 +135,25 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
if (info->attrs[L2TP_ATTR_FD]) {
fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
} else {
- if (info->attrs[L2TP_ATTR_IP_SADDR])
- cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
- if (info->attrs[L2TP_ATTR_IP_DADDR])
- cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
+ info->attrs[L2TP_ATTR_IP6_DADDR]) {
+ cfg.local_ip6 = nla_data(
+ info->attrs[L2TP_ATTR_IP6_SADDR]);
+ cfg.peer_ip6 = nla_data(
+ info->attrs[L2TP_ATTR_IP6_DADDR]);
+ } else
+#endif
+ if (info->attrs[L2TP_ATTR_IP_SADDR] &&
+ info->attrs[L2TP_ATTR_IP_DADDR]) {
+ cfg.local_ip.s_addr = nla_get_be32(
+ info->attrs[L2TP_ATTR_IP_SADDR]);
+ cfg.peer_ip.s_addr = nla_get_be32(
+ info->attrs[L2TP_ATTR_IP_DADDR]);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
if (info->attrs[L2TP_ATTR_UDP_SPORT])
cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
if (info->attrs[L2TP_ATTR_UDP_DPORT])
@@ -225,47 +242,85 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
struct nlattr *nest;
struct sock *sk = NULL;
struct inet_sock *inet;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = NULL;
+#endif
+ struct l2tp_stats stats;
+ unsigned int start;
hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
L2TP_CMD_TUNNEL_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
- NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
- NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
- NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
- NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
- NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+ if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
+ nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
+ nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
+ goto nla_put_failure;
nest = nla_nest_start(skb, L2TP_ATTR_STATS);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
- NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+ do {
+ start = u64_stats_fetch_begin(&tunnel->stats.syncp);
+ stats.tx_packets = tunnel->stats.tx_packets;
+ stats.tx_bytes = tunnel->stats.tx_bytes;
+ stats.tx_errors = tunnel->stats.tx_errors;
+ stats.rx_packets = tunnel->stats.rx_packets;
+ stats.rx_bytes = tunnel->stats.rx_bytes;
+ stats.rx_errors = tunnel->stats.rx_errors;
+ stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
+ stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
+ } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
+
+ if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
+ nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+ stats.rx_seq_discards) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+ stats.rx_oos_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
sk = tunnel->sock;
if (!sk)
goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ np = inet6_sk(sk);
+#endif
+
inet = inet_sk(sk);
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
- NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
- NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
- NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+ if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+ nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
+ nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
+ (sk->sk_no_check != UDP_CSUM_NOXMIT)))
+ goto nla_put_failure;
/* NOBREAK */
case L2TP_ENCAPTYPE_IP:
- NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
- NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (np) {
+ if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
+ &np->saddr) ||
+ nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
+ &np->daddr))
+ goto nla_put_failure;
+ } else
+#endif
+ if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+ nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+ goto nla_put_failure;
break;
}
@@ -556,6 +611,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
struct nlattr *nest;
struct l2tp_tunnel *tunnel = session->tunnel;
struct sock *sk = NULL;
+ struct l2tp_stats stats;
+ unsigned int start;
sk = tunnel->sock;
@@ -563,43 +620,64 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
if (IS_ERR(hdr))
return PTR_ERR(hdr);
- NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
- NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
- NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
- NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
- NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
- NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
- NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
- if (session->mru)
- NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
-
- if (session->ifname && session->ifname[0])
- NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
- if (session->cookie_len)
- NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
- if (session->peer_cookie_len)
- NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
- NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
- NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
- NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+ if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
+ session->peer_session_id) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
+ nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
+ nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
+ (session->mru &&
+ nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
+ goto nla_put_failure;
+
+ if ((session->ifname && session->ifname[0] &&
+ nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+ (session->cookie_len &&
+ nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+ &session->cookie[0])) ||
+ (session->peer_cookie_len &&
+ nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
+ &session->peer_cookie[0])) ||
+ nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
+ nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
+ nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
#ifdef CONFIG_XFRM
- if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
- NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+ (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
+ nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
#endif
- if (session->reorder_timeout)
- NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
+ (session->reorder_timeout &&
+ nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+ goto nla_put_failure;
nest = nla_nest_start(skb, L2TP_ATTR_STATS);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
- NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
- NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+
+ do {
+ start = u64_stats_fetch_begin(&session->stats.syncp);
+ stats.tx_packets = session->stats.tx_packets;
+ stats.tx_bytes = session->stats.tx_bytes;
+ stats.tx_errors = session->stats.tx_errors;
+ stats.rx_packets = session->stats.rx_packets;
+ stats.rx_bytes = session->stats.rx_bytes;
+ stats.rx_errors = session->stats.rx_errors;
+ stats.rx_seq_discards = session->stats.rx_seq_discards;
+ stats.rx_oos_packets = session->stats.rx_oos_packets;
+ } while (u64_stats_fetch_retry(&session->stats.syncp, start));
+
+ if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
+ nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+ stats.rx_seq_discards) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+ stats.rx_oos_packets) ||
+ nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
return genlmsg_end(skb, hdr);
@@ -708,6 +786,14 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
[L2TP_ATTR_MTU] = { .type = NLA_U16, },
[L2TP_ATTR_MRU] = { .type = NLA_U16, },
[L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
+ [L2TP_ATTR_IP6_SADDR] = {
+ .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr),
+ },
+ [L2TP_ATTR_IP6_DADDR] = {
+ .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr),
+ },
[L2TP_ATTR_IFNAME] = {
.type = NLA_NUL_STRING,
.len = IFNAMSIZ - 1,
@@ -818,7 +904,7 @@ static int l2tp_nl_init(void)
{
int err;
- printk(KERN_INFO "L2TP netlink interface\n");
+ pr_info("L2TP netlink interface\n");
err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
ARRAY_SIZE(l2tp_nl_ops));
@@ -837,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP netlink");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
- __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1addd9f3f40a..8ef6b9416cba 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -57,6 +57,8 @@
* http://openl2tp.sourceforge.net.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -106,12 +108,6 @@
/* Space for UDP, L2TP and PPP headers */
#define PPPOL2TP_HEADER_OVERHEAD 40
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while (0)
-
/* Number of bytes to build transmit L2TP headers.
* Unfortunately the size is different depending on whether sequence numbers
* are enabled.
@@ -236,9 +232,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, data_len);
+ l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, data_len);
/* We need to forget all info related to the L2TP packet
* gathered in the skb as we are going to reuse the same
@@ -259,8 +255,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
+ session->name);
/* Not bound. Nothing we can do, so discard. */
session->stats.rx_errors++;
@@ -270,8 +266,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
return;
no_sock:
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
kfree_skb(skb);
}
@@ -628,7 +623,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
{
struct sock *sk = sock->sk;
struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
- struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct l2tp_session *session = NULL;
struct l2tp_tunnel *tunnel;
@@ -657,7 +651,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (sk->sk_user_data)
goto end; /* socket is already attached */
- /* Get params from socket address. Handle L2TPv2 and L2TPv3 */
+ /* Get params from socket address. Handle L2TPv2 and L2TPv3.
+ * This is nasty because there are different sockaddr_pppol2tp
+ * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
+ * the sockaddr size to determine which structure the caller
+ * is using.
+ */
+ peer_tunnel_id = 0;
if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
fd = sp->pppol2tp.fd;
tunnel_id = sp->pppol2tp.s_tunnel;
@@ -665,12 +665,31 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
session_id = sp->pppol2tp.s_session;
peer_session_id = sp->pppol2tp.d_session;
} else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
+ struct sockaddr_pppol2tpv3 *sp3 =
+ (struct sockaddr_pppol2tpv3 *) sp;
ver = 3;
fd = sp3->pppol2tp.fd;
tunnel_id = sp3->pppol2tp.s_tunnel;
peer_tunnel_id = sp3->pppol2tp.d_tunnel;
session_id = sp3->pppol2tp.s_session;
peer_session_id = sp3->pppol2tp.d_session;
+ } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
+ struct sockaddr_pppol2tpin6 *sp6 =
+ (struct sockaddr_pppol2tpin6 *) sp;
+ fd = sp6->pppol2tp.fd;
+ tunnel_id = sp6->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp6->pppol2tp.d_tunnel;
+ session_id = sp6->pppol2tp.s_session;
+ peer_session_id = sp6->pppol2tp.d_session;
+ } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
+ struct sockaddr_pppol2tpv3in6 *sp6 =
+ (struct sockaddr_pppol2tpv3in6 *) sp;
+ ver = 3;
+ fd = sp6->pppol2tp.fd;
+ tunnel_id = sp6->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp6->pppol2tp.d_tunnel;
+ session_id = sp6->pppol2tp.s_session;
+ peer_session_id = sp6->pppol2tp.d_session;
} else {
error = -EINVAL;
goto end; /* bad socket address */
@@ -711,12 +730,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel->recv_payload_hook == NULL)
tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
- if (tunnel->peer_tunnel_id == 0) {
- if (ver == 2)
- tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
- else
- tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
- }
+ if (tunnel->peer_tunnel_id == 0)
+ tunnel->peer_tunnel_id = peer_tunnel_id;
/* Create session if it doesn't already exist. We handle the
* case where a session was previously created by the netlink
@@ -807,8 +822,8 @@ out_no_ppp:
/* This is how we get the session context from the socket. */
sk->sk_user_data = session;
sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+ session->name);
end:
release_sock(sk);
@@ -861,8 +876,8 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+ session->name);
error = 0;
@@ -916,7 +931,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
}
inet = inet_sk(tunnel->sock);
- if (tunnel->version == 2) {
+ if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
struct sockaddr_pppol2tp sp;
len = sizeof(sp);
memset(&sp, 0, len);
@@ -932,6 +947,46 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
sp.pppol2tp.addr.sin_port = inet->inet_dport;
sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
memcpy(uaddr, &sp, len);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if ((tunnel->version == 2) &&
+ (tunnel->sock->sk_family == AF_INET6)) {
+ struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+ struct sockaddr_pppol2tpin6 sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin6_family = AF_INET6;
+ sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
+ sizeof(np->daddr));
+ memcpy(uaddr, &sp, len);
+ } else if ((tunnel->version == 3) &&
+ (tunnel->sock->sk_family == AF_INET6)) {
+ struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+ struct sockaddr_pppol2tpv3in6 sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin6_family = AF_INET6;
+ sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
+ sizeof(np->daddr));
+ memcpy(uaddr, &sp, len);
+#endif
} else if (tunnel->version == 3) {
struct sockaddr_pppol2tpv3 sp;
len = sizeof(sp);
@@ -998,9 +1053,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
+ l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
sk = ps->sock;
sock_hold(sk);
@@ -1018,8 +1073,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
+ session->name, session->mtu);
err = 0;
break;
@@ -1034,8 +1089,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
session->mtu = ifr.ifr_mtu;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
+ session->name, session->mtu);
err = 0;
break;
@@ -1048,8 +1103,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(session->mru, (int __user *) arg))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
+ session->name, session->mru);
err = 0;
break;
@@ -1063,8 +1118,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
break;
session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
+ session->name, session->mru);
err = 0;
break;
@@ -1073,8 +1128,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(ps->flags, (int __user *) arg))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, ps->flags);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
+ session->name, ps->flags);
err = 0;
break;
@@ -1083,8 +1138,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, ps->flags);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
+ session->name, ps->flags);
err = 0;
break;
@@ -1100,8 +1155,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ session->name);
err = 0;
break;
@@ -1128,9 +1183,9 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
struct sock *sk;
struct pppol2tp_ioc_stats stats;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
- tunnel->name, cmd, arg);
+ l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
+ tunnel->name, cmd, arg);
sk = tunnel->sock;
sock_hold(sk);
@@ -1164,8 +1219,8 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
err = -EFAULT;
break;
}
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ tunnel->name);
err = 0;
break;
@@ -1254,8 +1309,8 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ tunnel->name, tunnel->debug);
break;
default:
@@ -1282,8 +1337,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name, session->recv_seq);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set recv_seq=%d\n",
+ session->name, session->recv_seq);
break;
case PPPOL2TP_SO_SENDSEQ:
@@ -1298,8 +1354,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set send_seq=%d\n",
+ session->name, session->send_seq);
break;
case PPPOL2TP_SO_LNSMODE:
@@ -1308,20 +1365,22 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name, session->lns_mode);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set lns_mode=%d\n",
+ session->name, session->lns_mode);
break;
case PPPOL2TP_SO_DEBUG:
session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ session->name, session->debug);
break;
case PPPOL2TP_SO_REORDERTO:
session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: set reorder_timeout=%d\n",
+ session->name, session->reorder_timeout);
break;
default:
@@ -1400,8 +1459,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
*val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
+ tunnel->name, tunnel->debug);
break;
default:
@@ -1423,32 +1482,32 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
*val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get recv_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_SENDSEQ:
*val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get send_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_LNSMODE:
*val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get lns_mode=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_DEBUG:
*val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
+ session->name, *val);
break;
case PPPOL2TP_SO_REORDERTO:
*val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
+ l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
break;
default:
@@ -1811,8 +1870,7 @@ static int __init pppol2tp_init(void)
goto out_unregister_pppox;
#endif
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
+ pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION);
out:
return err;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index ab3d35f23257..3cdaa046c1bc 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -15,6 +15,8 @@
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -279,9 +281,7 @@ int lapb_connect_request(struct net_device *dev)
lapb_establish_data_link(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev);
lapb->state = LAPB_STATE_1;
rc = LAPB_OK;
@@ -305,12 +305,8 @@ int lapb_disconnect_request(struct net_device *dev)
goto out_put;
case LAPB_STATE_1:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -329,12 +325,8 @@ int lapb_disconnect_request(struct net_device *dev)
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_2;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
+ lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
rc = LAPB_OK;
out_put:
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index f4e3c1accab7..5dba899131b3 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -15,6 +15,8 @@
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -44,25 +46,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -78,18 +71,11 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -102,22 +88,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_connect_indication(lapb, LAPB_OK);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
break;
@@ -137,68 +117,45 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
break;
case LAPB_UA:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev);
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_3;
@@ -212,14 +169,9 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -242,34 +194,22 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
switch (frame->type) {
case LAPB_SABM:
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
break;
case LAPB_UA:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
@@ -278,14 +218,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf) {
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
@@ -297,12 +232,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
case LAPB_REJ:
case LAPB_RNR:
case LAPB_RR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
- lapb->dev, frame->pf);
- printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
+ lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
if (frame->pf)
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
@@ -325,22 +257,15 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -355,15 +280,10 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -375,23 +295,16 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_requeue_frames(lapb);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
break;
case LAPB_DISC:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
lapb_start_t1timer(lapb);
@@ -401,13 +314,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_DM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_start_t1timer(lapb);
@@ -416,10 +324,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_RNR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -428,9 +334,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -439,10 +343,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_RR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -451,9 +353,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -462,10 +362,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_REJ:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n",
- lapb->dev, frame->pf, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n",
+ lapb->dev, frame->pf, frame->nr);
lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
lapb_check_need_response(lapb, frame->cr, frame->pf);
if (lapb_validate_nr(lapb, frame->nr)) {
@@ -477,9 +375,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -488,17 +384,13 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_I:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n",
- lapb->dev, frame->pf, frame->ns, frame->nr);
-#endif
+ lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n",
+ lapb->dev, frame->pf, frame->ns, frame->nr);
if (!lapb_validate_nr(lapb, frame->nr)) {
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_Z;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -522,7 +414,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
* a frame lost on the wire.
*/
if (cn == NET_RX_DROP) {
- printk(KERN_DEBUG "LAPB: rx congestion\n");
+ pr_debug("rx congestion\n");
break;
}
lapb->vr = (lapb->vr + 1) % modulus;
@@ -541,11 +433,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
if (frame->pf)
lapb_enquiry_response(lapb);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG
- "lapb: (%p) S3 TX REJ(%d) R%d\n",
- lapb->dev, frame->pf, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n",
+ lapb->dev, frame->pf, lapb->vr);
lapb->condition |= LAPB_REJECT_CONDITION;
lapb_send_control(lapb, LAPB_REJ, frame->pf,
LAPB_RESPONSE);
@@ -555,31 +444,22 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_FRMR:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X "
- "%02X %02X %02X %02X\n", lapb->dev, frame->pf,
- skb->data[0], skb->data[1], skb->data[2],
- skb->data[3], skb->data[4]);
-#endif
+ lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+ lapb->dev, frame->pf,
+ skb->data[0], skb->data[1], skb->data[2],
+ skb->data[3], skb->data[4]);
lapb_establish_data_link(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
lapb_requeue_frames(lapb);
lapb->state = LAPB_STATE_1;
break;
case LAPB_ILLEGAL:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf);
lapb->frmr_data = *frame;
lapb->frmr_type = LAPB_FRMR_W;
lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
lapb->state = LAPB_STATE_4;
@@ -600,25 +480,16 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
{
switch (frame->type) {
case LAPB_SABM:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -634,18 +505,11 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
break;
case LAPB_SABME:
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf);
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
- lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+ lapb->dev, frame->pf);
+ lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
lapb_send_control(lapb, LAPB_UA, frame->pf,
LAPB_RESPONSE);
lapb_stop_t1timer(lapb);
@@ -658,10 +522,8 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
lapb->va = 0;
lapb_connect_indication(lapb, LAPB_OK);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
- lapb->dev, frame->pf);
-#endif
+ lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+ lapb->dev, frame->pf);
lapb_send_control(lapb, LAPB_DM, frame->pf,
LAPB_RESPONSE);
}
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index baab2760f651..ba4d015bd1a6 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -14,6 +14,8 @@
* LAPB 002 Jonathan Naylor New timer architecture.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -60,10 +62,8 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll
*frame |= lapb->vs << 1;
}
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n",
- lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n",
+ lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
}
@@ -148,11 +148,9 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
}
}
-#if LAPB_DEBUG > 2
- printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
-#endif
+ lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[0], skb->data[1], skb->data[2]);
if (!lapb_data_transmit(lapb, skb))
kfree_skb(skb);
@@ -164,16 +162,10 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
lapb->n2count = 0;
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n",
- lapb->dev, lapb->state);
-#endif
+ lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state);
lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n",
- lapb->dev, lapb->state);
-#endif
+ lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state);
lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
}
@@ -183,10 +175,8 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
void lapb_enquiry_response(struct lapb_cb *lapb)
{
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n",
- lapb->dev, lapb->state, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n",
+ lapb->dev, lapb->state, lapb->vr);
lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE);
@@ -195,10 +185,8 @@ void lapb_enquiry_response(struct lapb_cb *lapb)
void lapb_timeout_response(struct lapb_cb *lapb)
{
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n",
- lapb->dev, lapb->state, lapb->vr);
-#endif
+ lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n",
+ lapb->dev, lapb->state, lapb->vr);
lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 066225b4e824..9d0a426eccbb 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -13,6 +13,8 @@
* LAPB 001 Jonathan Naylor Started Coding
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -111,11 +113,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
{
frame->type = LAPB_ILLEGAL;
-#if LAPB_DEBUG > 2
- printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[0], skb->data[1], skb->data[2]);
-#endif
+ lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[0], skb->data[1], skb->data[2]);
/* We always need to look at 2 bytes, sometimes we need
* to look at 3 and those cases are handled below.
@@ -284,12 +284,10 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
dptr++;
*dptr++ = lapb->frmr_type;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
- lapb->dev, lapb->state,
- skb->data[1], skb->data[2], skb->data[3],
- skb->data[4], skb->data[5]);
-#endif
+ lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+ lapb->dev, lapb->state,
+ skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5]);
} else {
dptr = skb_put(skb, 4);
*dptr++ = LAPB_FRMR;
@@ -301,11 +299,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
dptr++;
*dptr++ = lapb->frmr_type;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n",
- lapb->dev, lapb->state, skb->data[1],
- skb->data[2], skb->data[3]);
-#endif
+ lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
+ lapb->dev, lapb->state, skb->data[1],
+ skb->data[2], skb->data[3]);
}
lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index f8cd641dfc82..54563ad8aeb1 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -14,6 +14,8 @@
* LAPB 002 Jonathan Naylor New timer architecture.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -105,21 +107,17 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX SABME(1)\n",
+ lapb->dev);
lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
} else {
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S1 TX SABM(1)\n",
+ lapb->dev);
lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
}
}
@@ -133,15 +131,11 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
-#if LAPB_DEBUG > 1
- printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev);
-#endif
+ lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
}
break;
@@ -155,9 +149,7 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb->state = LAPB_STATE_0;
lapb_stop_t2timer(lapb);
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
@@ -173,9 +165,7 @@ static void lapb_t1timer_expiry(unsigned long param)
lapb_clear_queues(lapb);
lapb->state = LAPB_STATE_0;
lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
- printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev);
-#endif
+ lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
return;
} else {
lapb->n2count++;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b9bef2c75026..fe5453c3e719 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -71,8 +71,7 @@ static inline u16 llc_ui_next_link_no(int sap)
*/
static inline __be16 llc_proto_type(u16 arphrd)
{
- return arphrd == ARPHRD_IEEE802_TR ?
- htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
+ return htons(ETH_P_802_2);
}
/**
@@ -518,7 +517,7 @@ static int llc_ui_listen(struct socket *sock, int backlog)
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = 0;
- if (!(unsigned)backlog) /* BSDism */
+ if (!(unsigned int)backlog) /* BSDism */
backlog = 1;
sk->sk_max_ack_backlog = backlog;
if (sk->sk_state != TCP_LISTEN) {
@@ -806,10 +805,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
sk_wait_data(sk, &timeo);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
- if (net_ratelimit())
- printk(KERN_DEBUG "LLC(%s:%d): Application "
- "bug, race in MSG_PEEK.\n",
- current->comm, task_pid_nr(current));
+ net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
+ current->comm,
+ task_pid_nr(current));
peek_seq = llc->copied_seq;
}
continue;
@@ -840,7 +838,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
@@ -863,7 +861,7 @@ copy_uaddr:
if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6a224d..0d0d416dfab6 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
- if (sk_add_backlog(sk, skb))
+ if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
goto drop_unlock;
}
out:
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b658cba89fdd..2dae8a5df23f 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -14,9 +14,7 @@
*/
#include <linux/if_arp.h>
-#include <linux/if_tr.h>
#include <linux/netdevice.h>
-#include <linux/trdevice.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <net/llc.h>
@@ -37,7 +35,6 @@ int llc_mac_hdr_init(struct sk_buff *skb,
int rc = -EINVAL;
switch (skb->dev->type) {
- case ARPHRD_IEEE802_TR:
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 94e7fca75b85..7c5073badc73 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,10 +31,6 @@ static int llc_mac_header_len(unsigned short devtype)
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
return sizeof(struct ethhdr);
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
- case ARPHRD_IEEE802_TR:
- return sizeof(struct trh_hdr);
-#endif
}
return 0;
}
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index e2ebe3586263..d75306b9c2f3 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sysctl.h>
+#include <net/net_namespace.h>
#include <net/llc.h>
#ifndef CONFIG_SYSCTL
@@ -56,48 +57,29 @@ static struct ctl_table llc_station_table[] = {
{ },
};
-static struct ctl_table llc2_dir_timeout_table[] = {
- {
- .procname = "timeout",
- .mode = 0555,
- .child = llc2_timeout_table,
- },
- { },
-};
-
-static struct ctl_table llc_table[] = {
- {
- .procname = "llc2",
- .mode = 0555,
- .child = llc2_dir_timeout_table,
- },
- {
- .procname = "station",
- .mode = 0555,
- .child = llc_station_table,
- },
- { },
-};
-
-static struct ctl_path llc_path[] = {
- { .procname = "net", },
- { .procname = "llc", },
- { }
-};
-
-static struct ctl_table_header *llc_table_header;
+static struct ctl_table_header *llc2_timeout_header;
+static struct ctl_table_header *llc_station_header;
int __init llc_sysctl_init(void)
{
- llc_table_header = register_sysctl_paths(llc_path, llc_table);
+ llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
+ llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
- return llc_table_header ? 0 : -ENOMEM;
+ if (!llc2_timeout_header || !llc_station_header) {
+ llc_sysctl_exit();
+ return -ENOMEM;
+ }
+ return 0;
}
void llc_sysctl_exit(void)
{
- if (llc_table_header) {
- unregister_sysctl_table(llc_table_header);
- llc_table_header = NULL;
+ if (llc2_timeout_header) {
+ unregister_net_sysctl_table(llc2_timeout_header);
+ llc2_timeout_header = NULL;
+ }
+ if (llc_station_header) {
+ unregister_net_sysctl_table(llc_station_header);
+ llc_station_header = NULL;
}
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 96ddb72760b9..8d249d705980 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -225,6 +225,17 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_MESH_SYNC_DEBUG
+ bool "Verbose mesh mesh synchronization debugging"
+ depends on MAC80211_DEBUG_MENU
+ depends on MAC80211_MESH
+ ---help---
+ Selecting this option causes mac80211 to print out very verbose mesh
+ synchronization debugging messages (when mac80211 is taking part in a
+ mesh network).
+
+ Do not select this option.
+
config MAC80211_VERBOSE_TDLS_DEBUG
bool "Verbose TDLS debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 1be7a454aa77..3e9d931bba35 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -38,7 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mesh.o \
mesh_pathtbl.o \
mesh_plink.o \
- mesh_hwmp.o
+ mesh_hwmp.o \
+ mesh_sync.o
mac80211-$(CONFIG_PM) += pm.o
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 64d3ce5ea1a0..26ddb699d693 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -142,6 +142,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
u8 *timer_to_id = ptid - *ptid;
struct sta_info *sta = container_of(timer_to_id, struct sta_info,
timer_to_tid[0]);
+ struct tid_ampdu_rx *tid_rx;
+ unsigned long timeout;
+
+ tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
+ if (!tid_rx)
+ return;
+
+ timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&tid_rx->session_timer, timeout);
+ return;
+ }
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
@@ -248,11 +260,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
(buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "AddBA Req with bad params from "
- "%pM on tid %u. policy %d, buffer size %d\n",
- mgmt->sa, tid, ba_policy,
- buf_size);
+ net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+ mgmt->sa, tid, ba_policy, buf_size);
#endif /* CONFIG_MAC80211_HT_DEBUG */
goto end_no_lock;
}
@@ -269,10 +278,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (sta->ampdu_mlme.tid_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "unexpected AddBA Req from "
- "%pM on tid %u\n",
- mgmt->sa, tid);
+ net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
+ mgmt->sa, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
/* delete existing Rx BA session on the same tid */
@@ -291,7 +298,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* rx timer */
tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&tid_agg_rx->session_timer);
+ init_timer_deferrable(&tid_agg_rx->session_timer);
/* rx reorder timer */
tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
@@ -335,8 +342,10 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* activate it for RX */
rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
- if (timeout)
+ if (timeout) {
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+ tid_agg_rx->last_rx = jiffies;
+ }
end:
mutex_unlock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 76be61744198..7cf07158805c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -286,25 +286,25 @@ static inline int ieee80211_ac_from_tid(int tid)
* a global "agg_queue_stop" refcount.
*/
static void __acquires(agg_queue)
-ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
+ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
- int queue = ieee80211_ac_from_tid(tid);
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
- if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
+ if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
ieee80211_stop_queue_by_reason(
- &local->hw, queue,
+ &sdata->local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
__acquire(agg_queue);
}
static void __releases(agg_queue)
-ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
- int queue = ieee80211_ac_from_tid(tid);
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
- if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
+ if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
ieee80211_wake_queue_by_reason(
- &local->hw, queue,
+ &sdata->local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
__release(agg_queue);
}
@@ -314,13 +314,14 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
* requires a call to ieee80211_agg_splice_finish later
*/
static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_tx *tid_tx, u16 tid)
{
- int queue = ieee80211_ac_from_tid(tid);
+ struct ieee80211_local *local = sdata->local;
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
unsigned long flags;
- ieee80211_stop_queue_agg(local, tid);
+ ieee80211_stop_queue_agg(sdata, tid);
if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
" from the pending queue\n", tid))
@@ -336,9 +337,9 @@ ieee80211_agg_splice_packets(struct ieee80211_local *local,
}
static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
{
- ieee80211_wake_queue_agg(local, tid);
+ ieee80211_wake_queue_agg(sdata, tid);
}
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -376,9 +377,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
" tid %d\n", tid);
#endif
spin_lock_bh(&sta->lock);
- ieee80211_agg_splice_packets(local, tid_tx, tid);
+ ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
- ieee80211_agg_splice_finish(local, tid);
+ ieee80211_agg_splice_finish(sdata, tid);
spin_unlock_bh(&sta->lock);
kfree_rcu(tid_tx, rcu_head);
@@ -417,6 +418,24 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
u8 *timer_to_id = ptid - *ptid;
struct sta_info *sta = container_of(timer_to_id, struct sta_info,
timer_to_tid[0]);
+ struct tid_ampdu_tx *tid_tx;
+ unsigned long timeout;
+
+ rcu_read_lock();
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+ if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&tid_tx->session_timer, timeout);
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
@@ -542,7 +561,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
/* tx timer */
tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&tid_tx->session_timer);
+ init_timer_deferrable(&tid_tx->session_timer);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
@@ -586,14 +605,14 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
*/
spin_lock_bh(&sta->lock);
- ieee80211_agg_splice_packets(local, tid_tx, tid);
+ ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
/*
* Now mark as operational. This will be visible
* in the TX path, and lets it go lock-free in
* the common case.
*/
set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
- ieee80211_agg_splice_finish(local, tid);
+ ieee80211_agg_splice_finish(sta->sdata, tid);
spin_unlock_bh(&sta->lock);
}
@@ -778,12 +797,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
* more.
*/
- ieee80211_agg_splice_packets(local, tid_tx, tid);
+ ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
/* future packets must not find the tid_tx struct any more */
ieee80211_assign_tid_tx(sta, tid, NULL);
- ieee80211_agg_splice_finish(local, tid);
+ ieee80211_agg_splice_finish(sta->sdata, tid);
kfree_rcu(tid_tx, rcu_head);
@@ -884,9 +903,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
sta->ampdu_mlme.addba_req_num[tid] = 0;
- if (tid_tx->timeout)
+ if (tid_tx->timeout) {
mod_timer(&tid_tx->session_timer,
TU_TO_EXP_TIME(tid_tx->timeout));
+ tid_tx->last_tx = jiffies;
+ }
} else {
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 677d65929780..495831ee48f1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -412,6 +412,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->llid = le16_to_cpu(sta->llid);
sinfo->plid = le16_to_cpu(sta->plid);
sinfo->plink_state = sta->plink_state;
+ if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
+ sinfo->filled |= STATION_INFO_T_OFFSET;
+ sinfo->t_offset = sta->t_offset;
+ }
#endif
}
@@ -446,6 +450,180 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
}
+static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "rx_bytes", "wep_weak_iv_count",
+ "rx_duplicates", "rx_fragments", "rx_dropped",
+ "tx_packets", "tx_bytes", "tx_fragments",
+ "tx_filtered", "tx_retry_failed", "tx_retries",
+ "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+ "channel", "noise", "ch_time", "ch_time_busy",
+ "ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
+};
+#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats)
+
+static int ieee80211_get_et_sset_count(struct wiphy *wiphy,
+ struct net_device *dev,
+ int sset)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ int rv = 0;
+
+ if (sset == ETH_SS_STATS)
+ rv += STA_STATS_LEN;
+
+ rv += drv_get_et_sset_count(sdata, sset);
+
+ if (rv == 0)
+ return -EOPNOTSUPP;
+ return rv;
+}
+
+static void ieee80211_get_et_stats(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sta_info *sta;
+ struct ieee80211_local *local = sdata->local;
+ struct station_info sinfo;
+ struct survey_info survey;
+ int i, q;
+#define STA_STATS_SURVEY_LEN 7
+
+ memset(data, 0, sizeof(u64) * STA_STATS_LEN);
+
+#define ADD_STA_STATS(sta) \
+ do { \
+ data[i++] += sta->rx_packets; \
+ data[i++] += sta->rx_bytes; \
+ data[i++] += sta->wep_weak_iv_count; \
+ data[i++] += sta->num_duplicates; \
+ data[i++] += sta->rx_fragments; \
+ data[i++] += sta->rx_dropped; \
+ \
+ data[i++] += sta->tx_packets; \
+ data[i++] += sta->tx_bytes; \
+ data[i++] += sta->tx_fragments; \
+ data[i++] += sta->tx_filtered_count; \
+ data[i++] += sta->tx_retry_failed; \
+ data[i++] += sta->tx_retry_count; \
+ data[i++] += sta->beacon_loss_count; \
+ } while (0)
+
+ /* For Managed stations, find the single station based on BSSID
+ * and use that. For interface types, iterate through all available
+ * stations and add stats for any station that is assigned to this
+ * network device.
+ */
+
+ rcu_read_lock();
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
+
+ if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
+ goto do_survey;
+
+ i = 0;
+ ADD_STA_STATS(sta);
+
+ data[i++] = sta->sta_state;
+
+ sinfo.filled = 0;
+ sta_set_sinfo(sta, &sinfo);
+
+ if (sinfo.filled | STATION_INFO_TX_BITRATE)
+ data[i] = 100000 *
+ cfg80211_calculate_bitrate(&sinfo.txrate);
+ i++;
+ if (sinfo.filled | STATION_INFO_RX_BITRATE)
+ data[i] = 100000 *
+ cfg80211_calculate_bitrate(&sinfo.rxrate);
+ i++;
+
+ if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+ data[i] = (u8)sinfo.signal_avg;
+ i++;
+ } else {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ /* Make sure this station belongs to the proper dev */
+ if (sta->sdata->dev != dev)
+ continue;
+
+ i = 0;
+ ADD_STA_STATS(sta);
+ }
+ }
+
+do_survey:
+ i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
+ /* Get survey stats for current channel */
+ q = 0;
+ while (true) {
+ survey.filled = 0;
+ if (drv_get_survey(local, q, &survey) != 0) {
+ survey.filled = 0;
+ break;
+ }
+
+ if (survey.channel &&
+ (local->oper_channel->center_freq ==
+ survey.channel->center_freq))
+ break;
+ q++;
+ }
+
+ if (survey.filled)
+ data[i++] = survey.channel->center_freq;
+ else
+ data[i++] = 0;
+ if (survey.filled & SURVEY_INFO_NOISE_DBM)
+ data[i++] = (u8)survey.noise;
+ else
+ data[i++] = -1LL;
+ if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
+ data[i++] = survey.channel_time;
+ else
+ data[i++] = -1LL;
+ if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
+ data[i++] = survey.channel_time_busy;
+ else
+ data[i++] = -1LL;
+ if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
+ data[i++] = survey.channel_time_ext_busy;
+ else
+ data[i++] = -1LL;
+ if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
+ data[i++] = survey.channel_time_rx;
+ else
+ data[i++] = -1LL;
+ if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
+ data[i++] = survey.channel_time_tx;
+ else
+ data[i++] = -1LL;
+
+ rcu_read_unlock();
+
+ if (WARN_ON(i != STA_STATS_LEN))
+ return;
+
+ drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
+}
+
+static void ieee80211_get_et_strings(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 sset, u8 *data)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ int sz_sta_stats = 0;
+
+ if (sset == ETH_SS_STATS) {
+ sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
+ memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats);
+ }
+ drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
+}
static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
@@ -640,6 +818,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_bss_info_change_notify(sdata, changed);
+ netif_carrier_on(dev);
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ netif_carrier_on(vlan->dev);
+
return 0;
}
@@ -665,7 +847,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata, *vlan;
struct beacon_data *old;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -674,6 +856,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (!old)
return -ENOENT;
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ netif_carrier_off(vlan->dev);
+ netif_carrier_off(dev);
+
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
kfree_rcu(old, rcu_head);
@@ -907,7 +1093,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
} else
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (compare_ether_addr(mac, sdata->vif.addr) == 0)
+ if (ether_addr_equal(mac, sdata->vif.addr))
return -EINVAL;
if (is_multicast_ether_addr(mac))
@@ -993,6 +1179,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (params->vlan && params->vlan != sta->sdata->dev) {
+ bool prev_4addr = false;
+ bool new_4addr = false;
+
vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
@@ -1008,9 +1197,25 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ new_4addr = true;
+ }
+
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ sta->sdata->u.vlan.sta) {
+ rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
+ prev_4addr = true;
}
sta->sdata = vlansdata;
+
+ if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
+ prev_4addr != new_4addr) {
+ if (new_4addr)
+ atomic_dec(&sta->sdata->bss->num_mcast_sta);
+ else
+ atomic_inc(&sta->sdata->bss->num_mcast_sta);
+ }
+
ieee80211_send_layer2_update(sta);
}
@@ -1235,6 +1440,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
/* now copy the rest of the setup parameters */
ifmsh->mesh_id_len = setup->mesh_id_len;
memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
+ ifmsh->mesh_sp_id = setup->sync_method;
ifmsh->mesh_pp_id = setup->path_sel_proto;
ifmsh->mesh_pm_id = setup->path_metric;
ifmsh->security = IEEE80211_MESH_SEC_NONE;
@@ -1279,6 +1485,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
conf->dot11MeshTTL = nconf->element_ttl;
if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
conf->auto_open_plinks = nconf->auto_open_plinks;
+ if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
+ conf->dot11MeshNbrOffsetMaxNeighbor =
+ nconf->dot11MeshNbrOffsetMaxNeighbor;
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask))
conf->dot11MeshHWMPmaxPREQretries =
nconf->dot11MeshHWMPmaxPREQretries;
@@ -1329,6 +1538,11 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
return -ENOTSUPP;
conf->rssi_threshold = nconf->rssi_threshold;
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) {
+ conf->ht_opmode = nconf->ht_opmode;
+ sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+ }
return 0;
}
@@ -1437,6 +1651,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
if (!local->ops->conf_tx)
return -EOPNOTSUPP;
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ return -EOPNOTSUPP;
+
memset(&p, 0, sizeof(p));
p.aifs = params->aifs;
p.cw_max = params->cwmax;
@@ -1449,14 +1666,11 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
*/
p.uapsd = false;
- if (params->queue >= local->hw.queues)
- return -EINVAL;
-
- sdata->tx_conf[params->queue] = p;
- if (drv_conf_tx(local, sdata, params->queue, &p)) {
+ sdata->tx_conf[params->ac] = p;
+ if (drv_conf_tx(local, sdata, params->ac, &p)) {
wiphy_debug(local->hw.wiphy,
- "failed to set TX queue parameters for queue %d\n",
- params->queue);
+ "failed to set TX queue parameters for AC %d\n",
+ params->ac);
return -EINVAL;
}
@@ -2090,6 +2304,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_SKB_CB(skb)->flags = flags;
+ if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ IEEE80211_SKB_CB(skb)->hw_queue =
+ local->hw.offchannel_tx_hw_queue;
+
skb->dev = sdata->dev;
*cookie = (unsigned long) skb;
@@ -2131,6 +2349,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
/* modify cookie to prevent API mismatches */
*cookie ^= 2;
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ IEEE80211_SKB_CB(skb)->hw_queue =
+ local->hw.offchannel_tx_hw_queue;
local->hw_roc_skb = skb;
local->hw_roc_skb_for_status = skb;
mutex_unlock(&local->mtx);
@@ -2350,8 +2570,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_req.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_add_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -2364,8 +2584,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_add_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -2425,8 +2645,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_add_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
default:
@@ -2666,13 +2886,22 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
}
static struct ieee80211_channel *
-ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+ieee80211_wiphy_get_channel(struct wiphy *wiphy,
+ enum nl80211_channel_type *type)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ *type = local->_oper_channel_type;
return local->oper_channel;
}
+#ifdef CONFIG_PM
+static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ drv_set_wakeup(wiphy_priv(wiphy), enabled);
+}
+#endif
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2741,4 +2970,10 @@ struct cfg80211_ops mac80211_config_ops = {
.probe_client = ieee80211_probe_client,
.get_channel = ieee80211_wiphy_get_channel,
.set_noack_map = ieee80211_set_noack_map,
+#ifdef CONFIG_PM
+ .set_wakeup = ieee80211_set_wakeup,
+#endif
+ .get_et_sset_count = ieee80211_get_et_sset_count,
+ .get_et_stats = ieee80211_get_et_stats,
+ .get_et_strings = ieee80211_get_et_strings,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e00ce8c3e28e..c76cf7230c7d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -135,29 +135,3 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
return result;
}
-
-/*
- * ieee80211_get_tx_channel_type returns the channel type we should
- * use for packet transmission, given the channel capability and
- * whatever regulatory flags we have been given.
- */
-enum nl80211_channel_type ieee80211_get_tx_channel_type(
- struct ieee80211_local *local,
- enum nl80211_channel_type channel_type)
-{
- switch (channel_type) {
- case NL80211_CHAN_HT40PLUS:
- if (local->hw.conf.channel->flags &
- IEEE80211_CHAN_NO_HT40PLUS)
- return NL80211_CHAN_HT20;
- break;
- case NL80211_CHAN_HT40MINUS:
- if (local->hw.conf.channel->flags &
- IEEE80211_CHAN_NO_HT40MINUS)
- return NL80211_CHAN_HT20;
- break;
- default:
- break;
- }
- return channel_type;
-}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 30f99c344847..7ed433c66d68 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -394,7 +394,7 @@ static ssize_t ieee80211_if_parse_uapsd_max_sp_len(
__IEEE80211_IF_FILE_W(uapsd_max_sp_len);
/* AP attributes */
-IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
+IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -424,6 +424,7 @@ static ssize_t ieee80211_if_parse_tsf(
struct ieee80211_local *local = sdata->local;
unsigned long long tsf;
int ret;
+ int tsf_is_delta = 0;
if (strncmp(buf, "reset", 5) == 0) {
if (local->ops->reset_tsf) {
@@ -431,9 +432,20 @@ static ssize_t ieee80211_if_parse_tsf(
wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
}
} else {
+ if (buflen > 10 && buf[1] == '=') {
+ if (buf[0] == '+')
+ tsf_is_delta = 1;
+ else if (buf[0] == '-')
+ tsf_is_delta = -1;
+ else
+ return -EINVAL;
+ buf += 2;
+ }
ret = kstrtoull(buf, 10, &tsf);
if (ret < 0)
return -EINVAL;
+ if (tsf_is_delta)
+ tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
if (local->ops->set_tsf) {
drv_set_tsf(local, sdata, tsf);
wiphy_info(local->hw.wiphy,
@@ -497,28 +509,26 @@ IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
+IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
#endif
-
-#define DEBUGFS_ADD(name) \
- debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
- sdata, &name##_ops);
-
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, sdata->debugfs.dir, \
sdata, &name##_ops);
-static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
+
+static void add_common_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
- DEBUGFS_ADD(flags);
- DEBUGFS_ADD(state);
- DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+}
+static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+{
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
DEBUGFS_ADD(last_beacon);
@@ -531,16 +541,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted);
- DEBUGFS_ADD(flags);
- DEBUGFS_ADD(state);
- DEBUGFS_ADD(channel_type);
- DEBUGFS_ADD(rc_rateidx_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
- DEBUGFS_ADD(num_sta_authorized);
+ DEBUGFS_ADD(num_mcast_sta);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
@@ -549,48 +550,14 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(channel_type);
- DEBUGFS_ADD(rc_rateidx_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
DEBUGFS_ADD_MODE(tsf, 0600);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted);
- DEBUGFS_ADD(flags);
- DEBUGFS_ADD(state);
- DEBUGFS_ADD(channel_type);
- DEBUGFS_ADD(rc_rateidx_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
DEBUGFS_ADD(peer);
}
-static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_ADD(drop_unencrypted);
- DEBUGFS_ADD(flags);
- DEBUGFS_ADD(state);
- DEBUGFS_ADD(channel_type);
- DEBUGFS_ADD(rc_rateidx_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
- DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-}
-
-static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_ADD(flags);
- DEBUGFS_ADD(state);
- DEBUGFS_ADD(channel_type);
-}
-
#ifdef CONFIG_MAC80211_MESH
static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
@@ -642,6 +609,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
+ MESHPARAMS_ADD(ht_opmode);
#undef MESHPARAMS_ADD
}
#endif
@@ -651,6 +619,13 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
if (!sdata->debugfs.dir)
return;
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
+
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ add_common_files(sdata);
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_MESH_POINT:
#ifdef CONFIG_MAC80211_MESH
@@ -671,12 +646,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
case NL80211_IFTYPE_WDS:
add_wds_files(sdata);
break;
- case NL80211_IFTYPE_MONITOR:
- add_monitor_files(sdata);
- break;
- case NL80211_IFTYPE_AP_VLAN:
- add_vlan_files(sdata);
- break;
default:
break;
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 832b2da5e4cd..5ccec2c1e9f6 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,7 +63,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
int res = scnprintf(buf, sizeof(buf),
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
TEST(PS_DRIVER), TEST(AUTHORIZED),
TEST(SHORT_PREAMBLE),
@@ -71,7 +71,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
- TEST(INSERTED), TEST(RATE_CONTROL));
+ TEST(INSERTED), TEST(RATE_CONTROL),
+ TEST(TOFFSET_KNOWN));
#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index af4691fed645..6d33a0c743ab 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -7,7 +7,9 @@
static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
{
- WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+ WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
+ "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
+ sdata->dev->name, sdata->flags);
}
static inline struct ieee80211_sub_if_data *
@@ -33,6 +35,43 @@ static inline void drv_tx_frags(struct ieee80211_local *local,
local->ops->tx_frags(&local->hw, vif, sta, skbs);
}
+static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
+ u32 sset, u8 *data)
+{
+ struct ieee80211_local *local = sdata->local;
+ if (local->ops->get_et_strings) {
+ trace_drv_get_et_strings(local, sset);
+ local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data);
+ trace_drv_return_void(local);
+ }
+}
+
+static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ieee80211_local *local = sdata->local;
+ if (local->ops->get_et_stats) {
+ trace_drv_get_et_stats(local);
+ local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data);
+ trace_drv_return_void(local);
+ }
+}
+
+static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata,
+ int sset)
+{
+ struct ieee80211_local *local = sdata->local;
+ int rv = 0;
+ if (local->ops->get_et_sset_count) {
+ trace_drv_get_et_sset_count(local, sset);
+ rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif,
+ sset);
+ trace_drv_return_int(local, rv);
+ }
+ return rv;
+}
+
static inline int drv_start(struct ieee80211_local *local)
{
int ret;
@@ -89,6 +128,19 @@ static inline int drv_resume(struct ieee80211_local *local)
trace_drv_return_int(local, ret);
return ret;
}
+
+static inline void drv_set_wakeup(struct ieee80211_local *local,
+ bool enabled)
+{
+ might_sleep();
+
+ if (!local->ops->set_wakeup)
+ return;
+
+ trace_drv_set_wakeup(local, enabled);
+ local->ops->set_wakeup(&local->hw, enabled);
+ trace_drv_return_void(local);
+}
#endif
static inline int drv_add_interface(struct ieee80211_local *local,
@@ -99,7 +151,8 @@ static inline int drv_add_interface(struct ieee80211_local *local,
might_sleep();
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR))
+ (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))))
return -EINVAL;
trace_drv_add_interface(local, sdata);
@@ -474,8 +527,23 @@ int drv_sta_state(struct ieee80211_local *local,
return ret;
}
+static inline void drv_sta_rc_update(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
+ trace_drv_sta_rc_update(local, sdata, sta, changed);
+ if (local->ops->sta_rc_update)
+ local->ops->sta_rc_update(&local->hw, &sdata->vif,
+ sta, changed);
+
+ trace_drv_return_void(local);
+}
+
static inline int drv_conf_tx(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata, u16 queue,
+ struct ieee80211_sub_if_data *sdata, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
@@ -484,10 +552,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
check_sdata_in_driver(sdata);
- trace_drv_conf_tx(local, sdata, queue, params);
+ trace_drv_conf_tx(local, sdata, ac, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, &sdata->vif,
- queue, params);
+ ac, params);
trace_drv_return_int(local, ret);
return ret;
}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 21d6f5290a1c..6de00b2c268c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -161,6 +161,21 @@ DEFINE_EVENT(local_only_evt, drv_start,
TP_ARGS(local)
);
+DEFINE_EVENT(local_u32_evt, drv_get_et_strings,
+ TP_PROTO(struct ieee80211_local *local, u32 sset),
+ TP_ARGS(local, sset)
+);
+
+DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count,
+ TP_PROTO(struct ieee80211_local *local, u32 sset),
+ TP_ARGS(local, sset)
+);
+
+DEFINE_EVENT(local_only_evt, drv_get_et_stats,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
+
DEFINE_EVENT(local_only_evt, drv_suspend,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local)
@@ -171,6 +186,20 @@ DEFINE_EVENT(local_only_evt, drv_resume,
TP_ARGS(local)
);
+TRACE_EVENT(drv_set_wakeup,
+ TP_PROTO(struct ieee80211_local *local, bool enabled),
+ TP_ARGS(local, enabled),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, enabled)
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+ TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
+);
+
DEFINE_EVENT(local_only_evt, drv_stop,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local)
@@ -624,6 +653,34 @@ TRACE_EVENT(drv_sta_state,
)
);
+TRACE_EVENT(drv_sta_rc_update,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ u32 changed),
+
+ TP_ARGS(local, sdata, sta, changed),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u32, changed)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->changed = changed;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed
+ )
+);
+
TRACE_EVENT(drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -677,15 +734,14 @@ TRACE_EVENT(drv_sta_remove,
TRACE_EVENT(drv_conf_tx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- u16 queue,
- const struct ieee80211_tx_queue_params *params),
+ u16 ac, const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, sdata, queue, params),
+ TP_ARGS(local, sdata, ac, params),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __field(u16, queue)
+ __field(u16, ac)
__field(u16, txop)
__field(u16, cw_min)
__field(u16, cw_max)
@@ -696,7 +752,7 @@ TRACE_EVENT(drv_conf_tx,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->queue = queue;
+ __entry->ac = ac;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
@@ -705,8 +761,8 @@ TRACE_EVENT(drv_conf_tx,
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
+ LOCAL_PR_FMT VIF_PR_FMT " AC:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac
)
);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f25fff7607d8..6f8615c54b22 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,15 +19,6 @@
#include "ieee80211_i.h"
#include "rate.h"
-bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
-{
- const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
- !(sdata->u.mgd.ht_capa.cap_info & flg))
- return true;
- return false;
-}
-
static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
@@ -315,10 +306,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
- mgmt->sa, initiator ? "initiator" : "recipient", tid,
- le16_to_cpu(mgmt->u.action.u.delba.reason_code));
+ net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
+ mgmt->sa, initiator ? "initiator" : "recipient",
+ tid,
+ le16_to_cpu(mgmt->u.action.u.delba.reason_code));
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (initiator == WLAN_BACK_INITIATOR)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33fd8d9f714e..33d9d0c3e3d0 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
skb_reset_tail_pointer(skb);
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
- if (compare_ether_addr(ifibss->bssid, bssid))
+ if (!ether_addr_equal(ifibss->bssid, bssid))
sta_info_flush(sdata->local, sdata);
/* if merging, indicate to driver that we leave the old IBSS */
@@ -160,16 +160,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (channel_type && sband->ht_cap.ht_supported) {
pos = skb_put(skb, 4 +
sizeof(struct ieee80211_ht_cap) +
- sizeof(struct ieee80211_ht_info));
+ sizeof(struct ieee80211_ht_operation));
pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
sband->ht_cap.cap);
- pos = ieee80211_ie_build_ht_info(pos,
- &sband->ht_cap,
- chan,
- channel_type);
+ /*
+ * Note: According to 802.11n-2009 9.13.3.1, HT Protection
+ * field and RIFS Mode are reserved in IBSS mode, therefore
+ * keep them at 0
+ */
+ pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
+ chan, channel_type, 0);
}
- if (local->hw.queues >= 4) {
+ if (local->hw.queues >= IEEE80211_NUM_ACS) {
pos = skb_put(skb, 9);
*pos++ = WLAN_EID_VENDOR_SPECIFIC;
*pos++ = 7; /* len */
@@ -305,9 +308,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->name, addr);
+ net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ sdata->name, addr);
rcu_read_lock();
return NULL;
}
@@ -317,7 +319,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
}
- if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+ if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) {
rcu_read_lock();
return NULL;
}
@@ -403,14 +405,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
return;
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) {
+ ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
if (elems->supp_rates) {
supp_rates = ieee80211_sta_get_rates(local, elems,
- band);
+ band, NULL);
if (sta) {
u32 prev_rates;
@@ -441,13 +443,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (sta && elems->wmm_info)
set_sta_flag(sta, WLAN_STA_WME);
- if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+ if (sta && elems->ht_operation && elems->ht_cap_elem &&
sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
/* we both use HT */
struct ieee80211_sta_ht_cap sta_ht_cap_new;
enum nl80211_channel_type channel_type =
- ieee80211_ht_info_to_channel_type(
- elems->ht_info_elem);
+ ieee80211_ht_oper_to_channel_type(
+ elems->ht_operation);
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
@@ -457,8 +459,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* fall back to HT20 if we don't use or use
* the other extension channel
*/
- if ((channel_type == NL80211_CHAN_HT40MINUS ||
- channel_type == NL80211_CHAN_HT40PLUS) &&
+ if (!(channel_type == NL80211_CHAN_HT40MINUS ||
+ channel_type == NL80211_CHAN_HT40PLUS) ||
channel_type != sdata->u.ibss.channel_type)
sta_ht_cap_new.cap &=
~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -508,7 +510,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* same BSSID */
- if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0)
+ if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
goto put_bss;
if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -560,7 +562,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
- supp_rates = ieee80211_sta_get_rates(local, elems, band);
+ supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates, true);
rcu_read_unlock();
@@ -584,16 +586,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->name, addr);
+ net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ sdata->name, addr);
return;
}
if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
return;
- if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
+ if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
return;
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -831,7 +832,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
- if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 &&
+ if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) &&
!is_broadcast_ether_addr(mgmt->bssid))
return;
@@ -1063,7 +1064,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
4 /* IBSS params */ +
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
- 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + sizeof(struct ieee80211_ht_operation) +
params->ie_len);
if (!skb)
return -ENOMEM;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f20..3f3cd50fff16 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -52,7 +52,8 @@ struct ieee80211_local;
* increased memory use (about 2 kB of RAM per entry). */
#define IEEE80211_FRAGMENT_MAX 4
-#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
+#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
#define IEEE80211_DEFAULT_UAPSD_QUEUES \
(IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
@@ -281,7 +282,7 @@ struct ieee80211_if_ap {
u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
struct sk_buff_head ps_bc_buf;
atomic_t num_sta_ps; /* number of stations in PS mode */
- atomic_t num_sta_authorized; /* number of authorized stations */
+ atomic_t num_mcast_sta; /* number of stations receiving multicast */
int dtim_count;
bool dtim_bc_mc;
};
@@ -378,6 +379,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_UAPSD_ENABLED = BIT(7),
IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
+ IEEE80211_STA_DISABLE_40MHZ = BIT(10),
};
struct ieee80211_mgd_auth_data {
@@ -397,7 +399,7 @@ struct ieee80211_mgd_auth_data {
struct ieee80211_mgd_assoc_data {
struct cfg80211_bss *bss;
const u8 *supp_rates;
- const u8 *ht_information_ie;
+ const u8 *ht_operation_ie;
unsigned long timeout;
int tries;
@@ -552,6 +554,24 @@ struct ieee80211_if_ibss {
} state;
};
+/**
+ * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
+ *
+ * these declarations define the interface, which enables
+ * vendor-specific mesh synchronization
+ *
+ */
+struct ieee802_11_elems;
+struct ieee80211_mesh_sync_ops {
+ void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata,
+ u16 stype,
+ struct ieee80211_mgmt *mgmt,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status);
+ void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata);
+ /* add other framework functions here */
+};
+
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@@ -600,6 +620,11 @@ struct ieee80211_if_mesh {
IEEE80211_MESH_SEC_AUTHED = 0x1,
IEEE80211_MESH_SEC_SECURED = 0x2,
} security;
+ /* Extensible Synchronization Framework */
+ struct ieee80211_mesh_sync_ops *sync_ops;
+ s64 sync_offset_clockdrift_max;
+ spinlock_t sync_offset_lock;
+ bool adjusting_tbtt;
};
#ifdef CONFIG_MAC80211_MESH
@@ -666,12 +691,6 @@ struct ieee80211_sub_if_data {
char name[IFNAMSIZ];
- /*
- * keep track of whether the HT opmode (stored in
- * vif.bss_info.ht_operation_mode) is valid.
- */
- bool ht_opmode_valid;
-
/* to detect idle changes */
bool old_idle;
@@ -691,7 +710,7 @@ struct ieee80211_sub_if_data {
__be16 control_port_protocol;
bool control_port_no_encrypt;
- struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
+ struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
struct work_struct work;
struct sk_buff_head skb_queue;
@@ -761,7 +780,6 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
- IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
};
#ifdef CONFIG_MAC80211_LEDS
@@ -785,6 +803,8 @@ struct tpt_led_trigger {
* well be on the operating channel
* @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
* determine if we are on the operating channel or not
+ * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating
+ * channel. This should not interrupt normal traffic.
* @SCAN_COMPLETED: Set for our scan work function when the driver reported
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -793,6 +813,7 @@ struct tpt_led_trigger {
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
+ SCAN_ONCHANNEL_SCANNING,
SCAN_COMPLETED,
SCAN_ABORTED,
};
@@ -1082,6 +1103,9 @@ struct ieee80211_local {
struct net_device napi_dev;
struct napi_struct napi;
+
+ /* virtual monitor interface */
+ struct ieee80211_sub_if_data __rcu *monitor_sdata;
};
static inline struct ieee80211_sub_if_data *
@@ -1117,7 +1141,7 @@ struct ieee802_11_elems {
u8 *wmm_info;
u8 *wmm_param;
struct ieee80211_ht_cap *ht_cap_elem;
- struct ieee80211_ht_info *ht_info_elem;
+ struct ieee80211_ht_operation *ht_operation;
struct ieee80211_meshconf_ie *mesh_config;
u8 *mesh_id;
u8 *peering;
@@ -1171,7 +1195,7 @@ static inline struct ieee80211_local *hw_to_local(
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
{
- return compare_ether_addr(raddr, addr) == 0 ||
+ return ether_addr_equal(raddr, addr) ||
is_broadcast_ether_addr(raddr);
}
@@ -1210,7 +1234,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1239,6 +1263,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
+void ieee80211_run_deferred_scan(struct ieee80211_local *local);
ieee80211_rx_result
ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
@@ -1251,9 +1276,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
struct ieee80211_channel *channel,
bool beacon);
-struct ieee80211_bss *
-ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
- u8 *ssid, u8 ssid_len);
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
@@ -1299,7 +1321,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
/* HT */
-bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap);
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
@@ -1383,7 +1404,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
extern void *mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
-int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
+int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble);
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
struct ieee80211_hdr *hdr, const u8 *tsc,
@@ -1429,13 +1450,17 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason);
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason);
+void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-void ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs);
void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
struct sk_buff_head *skbs,
void (*fn)(void *data), void *data);
+static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
+{
+ ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+}
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
@@ -1460,7 +1485,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const u8 *supp_rates);
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
- enum ieee80211_band band);
+ enum ieee80211_band band, u32 *basic_rates);
int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode);
void ieee80211_recalc_smps(struct ieee80211_local *local);
@@ -1470,10 +1495,10 @@ size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap);
-u8 *ieee80211_ie_build_ht_info(u8 *pos,
- struct ieee80211_sta_ht_cap *ht_cap,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type);
+u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ u16 prot_mode);
/* internal work items */
void ieee80211_work_init(struct ieee80211_local *local);
@@ -1501,10 +1526,7 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum nl80211_channel_type chantype);
enum nl80211_channel_type
-ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
-enum nl80211_channel_type ieee80211_get_tx_channel_type(
- struct ieee80211_local *local,
- enum nl80211_channel_type channel_type);
+ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731e..d4c19a7773db 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -127,7 +127,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* The remaining checks are only performed for interfaces
* with the same MAC address.
*/
- if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
+ if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
continue;
/*
@@ -149,6 +149,35 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
+{
+ int n_queues = sdata->local->hw.queues;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
+ IEEE80211_INVAL_HW_QUEUE))
+ return -EINVAL;
+ if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
+ n_queues))
+ return -EINVAL;
+ }
+
+ if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
+ !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
+ sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues))
+ return -EINVAL;
+
+ return 0;
+}
+
void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset)
{
@@ -169,6 +198,83 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
#undef ADJUST
}
+static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
+ else if (local->hw.queues >= IEEE80211_NUM_ACS)
+ sdata->vif.hw_queue[i] = i;
+ else
+ sdata->vif.hw_queue[i] = 0;
+ }
+ sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+}
+
+static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+ if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+ return 0;
+
+ if (local->monitor_sdata)
+ return 0;
+
+ sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ /* set up data */
+ sdata->local = local;
+ sdata->vif.type = NL80211_IFTYPE_MONITOR;
+ snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
+ wiphy_name(local->hw.wiphy));
+
+ ieee80211_set_default_queues(sdata);
+
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
+
+ ret = ieee80211_check_queues(sdata);
+ if (ret) {
+ kfree(sdata);
+ return ret;
+ }
+
+ rcu_assign_pointer(local->monitor_sdata, sdata);
+
+ return 0;
+}
+
+static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+ return;
+
+ sdata = rtnl_dereference(local->monitor_sdata);
+
+ if (!sdata)
+ return;
+
+ rcu_assign_pointer(local->monitor_sdata, NULL);
+ synchronize_net();
+
+ drv_remove_interface(local, sdata);
+
+ kfree(sdata);
+}
+
/*
* NOTE: Be very careful when changing this function, it must NOT return
* an error on interface type changes that have been pre-checked, so most
@@ -246,15 +352,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
if (!is_valid_ether_addr(dev->dev_addr)) {
- if (!local->open_count)
- drv_stop(local);
- return -EADDRNOTAVAIL;
+ res = -EADDRNOTAVAIL;
+ goto err_stop;
}
}
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
- /* no need to tell driver */
+ /* no need to tell driver, but set carrier */
+ if (rtnl_dereference(sdata->bss->beacon))
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
break;
case NL80211_IFTYPE_MONITOR:
if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -262,6 +371,12 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
break;
}
+ if (local->monitors == 0 && local->open_count == 0) {
+ res = ieee80211_add_virtual_monitor(local);
+ if (res)
+ goto err_stop;
+ }
+
/* must be before the call to ieee80211_configure_filter */
local->monitors++;
if (local->monitors == 1) {
@@ -276,9 +391,14 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
break;
default:
if (coming_up) {
+ ieee80211_del_virtual_monitor(local);
+
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
+ res = ieee80211_check_queues(sdata);
+ if (res)
+ goto err_del_interface;
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -294,7 +414,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
ieee80211_bss_info_change_notify(sdata, changed);
if (sdata->vif.type == NL80211_IFTYPE_STATION ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_AP)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
@@ -366,6 +487,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
sdata->bss = NULL;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
list_del(&sdata->u.vlan.list);
+ /* might already be clear but that doesn't matter */
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
return res;
}
@@ -486,6 +608,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
skb_queue_purge(&sdata->u.ap.ps_bc_buf);
+ } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ ieee80211_mgd_stop(sdata);
}
if (going_down)
@@ -506,6 +630,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (local->monitors == 0) {
local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
+ ieee80211_del_virtual_monitor(local);
}
ieee80211_adjust_monitor_flags(sdata, -1);
@@ -579,6 +704,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ if (local->monitors == local->open_count && local->monitors > 0)
+ ieee80211_add_virtual_monitor(local);
}
static int ieee80211_stop(struct net_device *dev)
@@ -644,8 +772,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_rmc_free(sdata);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_mgd_teardown(sdata);
flushed = sta_info_flush(local, sdata);
WARN_ON(flushed);
@@ -676,7 +802,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct ieee80211_hdr *hdr;
struct ieee80211_radiotap_header *rtap = (void *)skb->data;
- if (local->hw.queues < 4)
+ if (local->hw.queues < IEEE80211_NUM_ACS)
return 0;
if (skb->len < 4 ||
@@ -907,6 +1033,18 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
ieee80211_debugfs_add_netdev(sdata);
}
+static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
+{
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ mesh_path_flush_by_iface(sdata);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type)
{
@@ -970,6 +1108,13 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
if (ret)
type = sdata->vif.type;
+ /*
+ * Ignore return value here, there's not much we can do since
+ * the driver changed the interface type internally already.
+ * The warnings will hopefully make driver authors fix it :-)
+ */
+ ieee80211_check_queues(sdata);
+
ieee80211_setup_sdata(sdata, type);
err = ieee80211_do_open(sdata->dev, false);
@@ -1133,11 +1278,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device *ndev;
struct ieee80211_sub_if_data *sdata = NULL;
int ret, i;
+ int txqs = 1;
ASSERT_RTNL();
+ if (local->hw.queues >= IEEE80211_NUM_ACS)
+ txqs = IEEE80211_NUM_ACS;
+
ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup, local->hw.queues, 1);
+ name, ieee80211_if_setup, txqs, 1);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1192,6 +1341,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sizeof(sdata->rc_rateidx_mcs_mask[i]));
}
+ ieee80211_set_default_queues(sdata);
+
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
@@ -1227,8 +1378,8 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
- if (ieee80211_vif_is_mesh(&sdata->vif))
- mesh_path_flush_by_iface(sdata);
+ /* clean up type-dependent data */
+ ieee80211_clean_sdata(sdata);
synchronize_rcu();
unregister_netdevice(sdata->dev);
@@ -1249,8 +1400,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
- if (ieee80211_vif_is_mesh(&sdata->vif))
- mesh_path_flush_by_iface(sdata);
+ ieee80211_clean_sdata(sdata);
unregister_netdevice_queue(sdata->dev, &unreg_list);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 16336480c631..f5548e953259 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,8 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
- if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning))
+ if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning))
new_flags |= FIF_BCN_PRBRESP_PROMISC;
if (local->fif_probe_req || local->probe_req_reg)
@@ -148,6 +149,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
}
if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
test_bit(SCAN_HW_SCANNING, &local->scanning))
power = chan->max_power;
else
@@ -557,8 +559,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION |
WIPHY_FLAG_REPORTS_OBSS |
- WIPHY_FLAG_OFFCHAN_TX |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_OFFCHAN_TX;
+
+ if (ops->remain_on_channel)
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
NL80211_FEATURE_HT_IBSS;
@@ -589,8 +593,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
+ local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
local->user_power_level = -1;
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
@@ -685,6 +693,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
WLAN_CIPHER_SUITE_AES_CMAC
};
+ if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
+ (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
+ local->hw.offchannel_tx_hw_queue >= local->hw.queues))
+ return -EINVAL;
+
if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
#ifdef CONFIG_PM
&& (!local->ops->suspend || !local->ops->resume)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e5fbb7cf3562..2913113c5833 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,9 +13,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
-#define MESHCONF_CAPAB_FORWARDING 0x08
-
#define TMR_RUNNING_HK 0
#define TMR_RUNNING_MP 1
#define TMR_RUNNING_MPR 2
@@ -67,16 +64,19 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
/**
* mesh_matches_local - check if the config of a mesh point matches ours
*
- * @ie: information elements of a management frame from the mesh peer
* @sdata: local mesh subif
+ * @ie: information elements of a management frame from the mesh peer
*
* This function checks if the mesh configuration of a mesh point matches the
* local mesh configuration, i.e. if both nodes belong to the same mesh network.
*/
-bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
+bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
+ struct ieee802_11_elems *ie)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
+ u32 basic_rates = 0;
+ enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT;
/*
* As support for each feature is added, check for matching
@@ -97,10 +97,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
goto mismatch;
- /* disallow peering with mismatched channel types for now */
- if (ie->ht_info_elem &&
- (local->_oper_channel_type !=
- ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+ ieee80211_sta_get_rates(local, ie, local->oper_channel->band,
+ &basic_rates);
+
+ if (sdata->vif.bss_conf.basic_rates != basic_rates)
+ goto mismatch;
+
+ if (ie->ht_operation)
+ sta_channel_type =
+ ieee80211_ht_oper_to_channel_type(ie->ht_operation);
+
+ /* Disallow HT40+/- mismatch */
+ if (ie->ht_operation &&
+ (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
+ local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+ (sta_channel_type == NL80211_CHAN_HT40MINUS ||
+ sta_channel_type == NL80211_CHAN_HT40PLUS) &&
+ local->_oper_channel_type != sta_channel_type)
goto mismatch;
return true;
@@ -204,7 +217,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) &&
- (compare_ether_addr(sa, p->sa) == 0))
+ (ether_addr_equal(sa, p->sa)))
return -1;
}
@@ -251,8 +264,10 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
/* Mesh capability */
ifmsh->accepting_plinks = mesh_plink_availables(sdata);
*pos = MESHCONF_CAPAB_FORWARDING;
- *pos++ |= ifmsh->accepting_plinks ?
+ *pos |= ifmsh->accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
+ *pos++ |= ifmsh->adjusting_tbtt ?
+ MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
*pos++ = 0x00;
return 0;
@@ -371,7 +386,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
return 0;
}
-int mesh_add_ht_info_ie(struct sk_buff *skb,
+int mesh_add_ht_oper_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -385,11 +400,12 @@ int mesh_add_ht_info_ie(struct sk_buff *skb,
if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
return 0;
- if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
- pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
- ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
+ ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type,
+ sdata->vif.bss_conf.ht_operation_mode);
return 0;
}
@@ -573,14 +589,24 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
ieee80211_configure_filter(local);
ifmsh->mesh_cc_id = 0; /* Disabled */
- ifmsh->mesh_sp_id = 0; /* Neighbor Offset */
ifmsh->mesh_auth_id = 0; /* Disabled */
+ /* register sync ops from extensible synchronization framework */
+ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
+ ifmsh->adjusting_tbtt = false;
+ ifmsh->sync_offset_clockdrift_max = 0;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
ieee80211_queue_work(&local->hw, &sdata->work);
+ sdata->vif.bss_conf.ht_operation_mode =
+ ifmsh->mshcfg.ht_opmode;
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
+ sdata->vif.bss_conf.basic_rates =
+ ieee80211_mandatory_rates(sdata->local,
+ sdata->local->hw.conf.channel->band);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT);
}
@@ -616,16 +642,16 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee802_11_elems elems;
struct ieee80211_channel *channel;
- u32 supp_rates = 0;
size_t baselen;
int freq;
enum ieee80211_band band = rx_status->band;
/* ignore ProbeResp to foreign address */
if (stype == IEEE80211_STYPE_PROBE_RESP &&
- compare_ether_addr(mgmt->da, sdata->vif.addr))
+ !ether_addr_equal(mgmt->da, sdata->vif.addr))
return;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -650,10 +676,12 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
return;
if (elems.mesh_id && elems.mesh_config &&
- mesh_matches_local(&elems, sdata)) {
- supp_rates = ieee80211_sta_get_rates(local, &elems, band);
- mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems);
- }
+ mesh_matches_local(sdata, &elems))
+ mesh_neighbour_update(sdata, mgmt->sa, &elems);
+
+ if (ifmsh->sync_ops)
+ ifmsh->sync_ops->rx_bcn_presp(sdata,
+ stype, mgmt, &elems, rx_status);
}
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
@@ -721,6 +749,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
ieee80211_mesh_rootpath(sdata);
+
+ if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
+ mesh_sync_adjust_tbtt(sdata);
}
void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -761,4 +792,5 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
+ spin_lock_init(&ifmsh->sync_offset_lock);
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8d53b71378e3..e3642756f8f4 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -19,6 +19,20 @@
/* Data structures */
/**
+ * enum mesh_config_capab_flags - mesh config IE capability flags
+ *
+ * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
+ */
+enum mesh_config_capab_flags {
+ MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
+ MESHCONF_CAPAB_FORWARDING = BIT(3),
+ MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
+};
+
+/**
* enum mesh_path_flags - mac80211 mesh path flags
*
*
@@ -56,12 +70,15 @@ enum mesh_path_flags {
* @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
* grow
* @MESH_WORK_ROOT: the mesh root station needs to send a frame
+ * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
+ * mesh nodes
*/
enum mesh_deferred_task_flags {
MESH_WORK_HOUSEKEEPING,
MESH_WORK_GROW_MPATH_TABLE,
MESH_WORK_GROW_MPP_TABLE,
MESH_WORK_ROOT,
+ MESH_WORK_DRIFT_ADJUST,
};
/**
@@ -86,6 +103,7 @@ enum mesh_deferred_task_flags {
* mpath itself. No need to take this lock when adding or removing
* an mpath to a hash bucket on a path table.
* @rann_snd_addr: the RANN sender address
+ * @rann_metric: the aggregated path metric towards the root node
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
*
@@ -112,6 +130,7 @@ struct mesh_path {
enum mesh_path_flags flags;
spinlock_t state_lock;
u8 rann_snd_addr[ETH_ALEN];
+ u32 rann_metric;
bool is_root;
bool is_gate;
};
@@ -203,8 +222,8 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
char *addr6);
int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
struct ieee80211_sub_if_data *sdata);
-bool mesh_matches_local(struct ieee802_11_elems *ie,
- struct ieee80211_sub_if_data *sdata);
+bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
+ struct ieee802_11_elems *ie);
void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
void mesh_mgmt_ies_add(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
@@ -220,7 +239,7 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
int mesh_add_ht_cap_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
-int mesh_add_ht_info_ie(struct sk_buff *skb,
+int mesh_add_ht_oper_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
@@ -232,6 +251,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
+struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
/* Mesh paths */
int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -256,9 +276,9 @@ int mesh_path_add_gate(struct mesh_path *mpath);
int mesh_path_send_to_gates(struct mesh_path *mpath);
int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
-void mesh_neighbour_update(u8 *hw_addr, u32 rates,
- struct ieee80211_sub_if_data *sdata,
- struct ieee802_11_elems *ie);
+void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
+ u8 *hw_addr,
+ struct ieee802_11_elems *ie);
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
void mesh_plink_broken(struct sta_info *sta);
@@ -284,7 +304,6 @@ void mesh_pathtbl_unregister(void);
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
void mesh_path_timer(unsigned long data);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
-void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
@@ -325,6 +344,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
void mesh_plink_quiesce(struct sta_info *sta);
void mesh_plink_restart(struct sta_info *sta);
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
#else
#define mesh_allocated 0
static inline void
@@ -337,6 +358,8 @@ static inline void mesh_plink_quiesce(struct sta_info *sta) {}
static inline void mesh_plink_restart(struct sta_info *sta) {}
static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
{ return false; }
+static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+{}
#endif
#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 1c6f3d02aebf..9b59658e8650 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -86,8 +86,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
#define MSEC_TO_TU(x) (x*1000/1024)
-#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
-#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
+#define SN_GT(x, y) ((s32)(y - x) < 0)
+#define SN_LT(x, y) ((s32)(x - y) < 0)
#define net_traversal_jiffies(s) \
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -422,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
+ if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
@@ -472,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
/* Update and check transmitter routing info */
ta = mgmt->sa;
- if (compare_ether_addr(orig_addr, ta) == 0)
+ if (ether_addr_equal(orig_addr, ta))
fresh_info = false;
else {
fresh_info = true;
@@ -533,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREQ from %pM", orig_addr);
- if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
+ if (ether_addr_equal(target_addr, sdata->vif.addr)) {
mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
@@ -603,7 +603,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
- ifmsh->mshstats.fwded_mcast++;
+ if (!is_multicast_ether_addr(da))
+ ifmsh->mshstats.fwded_unicast++;
+ else
+ ifmsh->mshstats.fwded_mcast++;
ifmsh->mshstats.fwded_frames++;
}
}
@@ -631,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
/* destination, no forwarding required */
return;
@@ -709,7 +712,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
spin_lock_bh(&mpath->state_lock);
sta = next_hop_deref_protected(mpath);
if (mpath->flags & MESH_PATH_ACTIVE &&
- compare_ether_addr(ta, sta->sta.addr) == 0 &&
+ ether_addr_equal(ta, sta->sta.addr) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
SN_GT(target_sn, mpath->sn))) {
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -732,11 +735,12 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rann_ie *rann)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
struct mesh_path *mpath;
u8 ttl, flags, hopcount;
u8 *orig_addr;
- u32 orig_sn, metric;
- u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ u32 orig_sn, metric, metric_txsta, interval;
bool root_is_gate;
ttl = rann->rann_ttl;
@@ -748,19 +752,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
flags = rann->rann_flags;
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
- orig_sn = rann->rann_seq;
+ orig_sn = le32_to_cpu(rann->rann_seq);
+ interval = le32_to_cpu(rann->rann_interval);
hopcount = rann->rann_hopcount;
hopcount++;
- metric = rann->rann_metric;
+ metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
- if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
orig_addr, mgmt->sa, root_is_gate);
rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
+ }
+
+ metric_txsta = airtime_link_metric_get(local, sta);
+
mpath = mesh_path_lookup(orig_addr, sdata);
if (!mpath) {
mesh_path_add(orig_addr, sdata);
@@ -780,18 +793,21 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
- if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) {
+ if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
+ metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
hopcount, ttl, cpu_to_le32(interval),
- cpu_to_le32(metric + mpath->metric),
+ cpu_to_le32(metric + metric_txsta),
0, sdata);
mpath->sn = orig_sn;
+ mpath->rann_metric = metric + metric_txsta;
+ /* Recording RANNs sender address to send individually
+ * addressed PREQs destined for root mesh STA */
+ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
}
- /* Using individually addressed PREQ for root node */
- memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
mpath->is_root = true;
if (root_is_gate)
@@ -1086,7 +1102,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
+ ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED))
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 49aaefd99635..b39224d8255c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -348,7 +348,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0) {
+ ether_addr_equal(dst, mpath->dst)) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+ if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -538,6 +538,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
+ memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+ new_mpath->is_root = false;
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
@@ -559,7 +561,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0)
+ ether_addr_equal(dst, mpath->dst))
goto err_exists;
}
@@ -650,7 +652,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+ if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -688,7 +690,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(dst, mpath->dst) == 0)
+ ether_addr_equal(dst, mpath->dst))
goto err_exists;
}
@@ -882,7 +884,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- compare_ether_addr(addr, mpath->dst) == 0) {
+ ether_addr_equal(addr, mpath->dst)) {
__mesh_path_del(tbl, node);
goto enddel;
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 4e53c4cbca9e..60ef235c9d9b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -82,20 +82,14 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
}
/*
- * NOTE: This is just an alias for sta_info_alloc(), see notes
- * on it in the lifecycle management section!
+ * Allocate mesh sta entry and insert into station table
*/
static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
- u8 *hw_addr, u32 rates,
- struct ieee802_11_elems *elems)
+ u8 *hw_addr)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
struct sta_info *sta;
- sband = local->hw.wiphy->bands[local->oper_channel->band];
-
- if (local->num_sta >= MESH_MAX_PLINKS)
+ if (sdata->local->num_sta >= MESH_MAX_PLINKS)
return NULL;
sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
@@ -108,16 +102,71 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
set_sta_flag(sta, WLAN_STA_WME);
- sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
- if (elems->ht_cap_elem)
- ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems->ht_cap_elem,
- &sta->sta.ht_cap);
- rate_control_rate_init(sta);
-
return sta;
}
+/*
+ * mesh_set_ht_prot_mode - set correct HT protection mode
+ *
+ * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
+ * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
+ * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
+ * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
+ * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
+ * HT20 peer is present. Otherwise no-protection mode is selected.
+ */
+static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ u32 changed = 0;
+ u16 ht_opmode;
+ bool non_ht_sta = false, ht20_sta = false;
+
+ if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata ||
+ sta->plink_state != NL80211_PLINK_ESTAB)
+ continue;
+
+ switch (sta->ch_type) {
+ case NL80211_CHAN_NO_HT:
+ mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ non_ht_sta = true;
+ goto out;
+ case NL80211_CHAN_HT20:
+ mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ ht20_sta = true;
+ default:
+ break;
+ }
+ }
+out:
+ rcu_read_unlock();
+
+ if (non_ht_sta)
+ ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
+ else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+ ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
+ else
+ ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
+ sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
+ sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
+ changed = BSS_CHANGED_HT;
+ mpl_dbg("mesh_plink %pM: protection mode changed to %d",
+ sdata->vif.addr, ht_opmode);
+ }
+
+ return changed;
+}
+
/**
* __mesh_plink_deactivate - deactivate mesh peer link
*
@@ -187,7 +236,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
2 + sdata->u.mesh.mesh_id_len +
2 + sizeof(struct ieee80211_meshconf_ie) +
2 + sizeof(struct ieee80211_ht_cap) +
- 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + sizeof(struct ieee80211_ht_operation) +
2 + 8 + /* peering IE */
sdata->u.mesh.ie_len);
if (!skb)
@@ -212,8 +261,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata))
@@ -263,7 +312,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
if (mesh_add_ht_cap_ie(skb, sdata) ||
- mesh_add_ht_info_ie(skb, sdata))
+ mesh_add_ht_oper_ie(skb, sdata))
return -1;
}
@@ -274,43 +323,93 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void mesh_neighbour_update(u8 *hw_addr, u32 rates,
- struct ieee80211_sub_if_data *sdata,
- struct ieee802_11_elems *elems)
+/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
+ *
+ * @sdata: local meshif
+ * @addr: peer's address
+ * @elems: IEs from beacon or mesh peering frame
+ *
+ * call under RCU
+ */
+static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
+ u8 *addr,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_local *local = sdata->local;
+ enum ieee80211_band band = local->oper_channel->band;
+ struct ieee80211_supported_band *sband;
+ u32 rates, basic_rates = 0;
struct sta_info *sta;
+ bool insert = false;
- rcu_read_lock();
+ sband = local->hw.wiphy->bands[band];
+ rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
- sta = sta_info_get(sdata, hw_addr);
+ sta = sta_info_get(sdata, addr);
if (!sta) {
- rcu_read_unlock();
- /* Userspace handles peer allocation when security is enabled
- * */
- if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
- cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
- elems->ie_start, elems->total_len,
- GFP_KERNEL);
- else
- sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
- if (!sta)
- return;
- if (sta_info_insert_rcu(sta)) {
- rcu_read_unlock();
- return;
+ /* Userspace handles peer allocation when security is enabled */
+ if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+ elems->ie_start,
+ elems->total_len,
+ GFP_ATOMIC);
+ return NULL;
}
+
+ sta = mesh_plink_alloc(sdata, addr);
+ if (!sta)
+ return NULL;
+ insert = true;
}
+ spin_lock_bh(&sta->lock);
sta->last_rx = jiffies;
- sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ sta->sta.supp_rates[band] = rates;
+ if (elems->ht_cap_elem &&
+ sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+ elems->ht_cap_elem,
+ &sta->sta.ht_cap);
+ else
+ memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
+
+ if (elems->ht_operation) {
+ if (!(elems->ht_operation->ht_param &
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
+ sta->sta.ht_cap.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sta->ch_type =
+ ieee80211_ht_oper_to_channel_type(elems->ht_operation);
+ }
+
+ rate_control_rate_init(sta);
+ spin_unlock_bh(&sta->lock);
+
+ if (insert && sta_info_insert(sta))
+ return NULL;
+
+ return sta;
+}
+
+void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
+ u8 *hw_addr,
+ struct ieee802_11_elems *elems)
+{
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = mesh_peer_init(sdata, hw_addr, elems);
+ if (!sta)
+ goto out;
+
if (mesh_peer_accepts_plinks(elems) &&
- sta->plink_state == NL80211_PLINK_LISTEN &&
- sdata->u.mesh.accepting_plinks &&
- sdata->u.mesh.mshcfg.auto_open_plinks &&
- rssi_threshold_check(sta, sdata))
+ sta->plink_state == NL80211_PLINK_LISTEN &&
+ sdata->u.mesh.accepting_plinks &&
+ sdata->u.mesh.mshcfg.auto_open_plinks &&
+ rssi_threshold_check(sta, sdata))
mesh_plink_open(sta);
+out:
rcu_read_unlock();
}
@@ -456,15 +555,15 @@ void mesh_plink_block(struct sta_info *sta)
void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
size_t len, struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_local *local = sdata->local;
struct ieee802_11_elems elems;
struct sta_info *sta;
enum plink_event event;
enum ieee80211_self_protected_actioncode ftype;
size_t baselen;
- bool deactivated, matches_local = true;
+ bool matches_local = true;
u8 ie_len;
u8 *baseaddr;
+ u32 changed = 0;
__le16 plid, llid, reason;
#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
static const char *mplstates[] = {
@@ -560,7 +659,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* Now we will figure out the appropriate event... */
event = PLINK_UNDEFINED;
if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
- (!mesh_matches_local(&elems, sdata))) {
+ !mesh_matches_local(sdata, &elems)) {
matches_local = false;
switch (ftype) {
case WLAN_SP_MESH_PEERING_OPEN:
@@ -583,29 +682,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
} else if (!sta) {
/* ftype == WLAN_SP_MESH_PEERING_OPEN */
- u32 rates;
-
- rcu_read_unlock();
-
if (!mesh_plink_free_count(sdata)) {
mpl_dbg("Mesh plink error: no more free plinks\n");
- return;
- }
-
- rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
- sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
- if (!sta) {
- mpl_dbg("Mesh plink error: plink table full\n");
- return;
- }
- if (sta_info_insert_rcu(sta)) {
rcu_read_unlock();
return;
}
event = OPN_ACPT;
- spin_lock_bh(&sta->lock);
} else if (matches_local) {
- spin_lock_bh(&sta->lock);
switch (ftype) {
case WLAN_SP_MESH_PEERING_OPEN:
if (!mesh_plink_free_count(sdata) ||
@@ -642,12 +725,19 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
break;
default:
mpl_dbg("Mesh plink: unknown frame subtype\n");
- spin_unlock_bh(&sta->lock);
rcu_read_unlock();
return;
}
- } else {
- spin_lock_bh(&sta->lock);
+ }
+
+ if (event == OPN_ACPT) {
+ /* allocate sta entry if necessary and update info */
+ sta = mesh_peer_init(sdata, mgmt->sa, &elems);
+ if (!sta) {
+ mpl_dbg("Mesh plink: failed to init peer!\n");
+ rcu_read_unlock();
+ return;
+ }
}
mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
@@ -655,6 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
event);
reason = 0;
+ spin_lock_bh(&sta->lock);
switch (sta->plink_state) {
/* spin_unlock as soon as state is updated at each case */
case NL80211_PLINK_LISTEN:
@@ -758,7 +849,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->plink_state = NL80211_PLINK_ESTAB;
spin_unlock_bh(&sta->lock);
mesh_plink_inc_estab_count(sdata);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ changed |= mesh_set_ht_prot_mode(sdata);
+ changed |= BSS_CHANGED_BEACON;
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
break;
@@ -793,7 +885,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->plink_state = NL80211_PLINK_ESTAB;
spin_unlock_bh(&sta->lock);
mesh_plink_inc_estab_count(sdata);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ changed |= mesh_set_ht_prot_mode(sdata);
+ changed |= BSS_CHANGED_BEACON;
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
mesh_plink_frame_tx(sdata,
@@ -811,13 +904,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case CLS_ACPT:
reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
- deactivated = __mesh_plink_deactivate(sta);
+ __mesh_plink_deactivate(sta);
sta->plink_state = NL80211_PLINK_HOLDING;
llid = sta->llid;
mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
spin_unlock_bh(&sta->lock);
- if (deactivated)
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ changed |= mesh_set_ht_prot_mode(sdata);
+ changed |= BSS_CHANGED_BEACON;
mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
sta->sta.addr, llid, plid, reason);
break;
@@ -864,4 +957,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
rcu_read_unlock();
+
+ if (changed)
+ ieee80211_bss_info_change_notify(sdata, changed);
}
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
new file mode 100644
index 000000000000..38d30e8ce6dc
--- /dev/null
+++ b/net/mac80211/mesh_sync.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com>
+ * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
+ * Copyright 2011-2012, cozybit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ieee80211_i.h"
+#include "mesh.h"
+#include "driver-ops.h"
+
+#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
+#define msync_dbg(fmt, args...) \
+ printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
+#else
+#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
+#endif
+
+/* This is not in the standard. It represents a tolerable tbtt drift below
+ * which we do no TSF adjustment.
+ */
+#define TOFFSET_MINIMUM_ADJUSTMENT 10
+
+/* This is not in the standard. It is a margin added to the
+ * Toffset setpoint to mitigate TSF overcorrection
+ * introduced by TSF adjustment latency.
+ */
+#define TOFFSET_SET_MARGIN 20
+
+/* This is not in the standard. It represents the maximum Toffset jump above
+ * which we'll invalidate the Toffset setpoint and choose a new setpoint. This
+ * could be, for instance, in case a neighbor is restarted and its TSF counter
+ * reset.
+ */
+#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */
+
+struct sync_method {
+ u8 method;
+ struct ieee80211_mesh_sync_ops ops;
+};
+
+/**
+ * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
+ *
+ * @ie: information elements of a management frame from the mesh peer
+ */
+static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
+{
+ return (ie->mesh_config->meshconf_cap &
+ MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
+}
+
+void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */
+ u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500;
+ u64 tsf;
+ u64 tsfdelta;
+
+ spin_lock_bh(&ifmsh->sync_offset_lock);
+
+ if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
+ msync_dbg("TBTT : max clockdrift=%lld; adjusting",
+ (long long) ifmsh->sync_offset_clockdrift_max);
+ tsfdelta = -ifmsh->sync_offset_clockdrift_max;
+ ifmsh->sync_offset_clockdrift_max = 0;
+ } else {
+ msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
+ (long long) ifmsh->sync_offset_clockdrift_max,
+ (unsigned long long) beacon_int_fraction);
+ tsfdelta = -beacon_int_fraction;
+ ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
+ }
+
+ tsf = drv_get_tsf(local, sdata);
+ if (tsf != -1ULL)
+ drv_set_tsf(local, sdata, tsf + tsfdelta);
+ spin_unlock_bh(&ifmsh->sync_offset_lock);
+}
+
+static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+ u16 stype,
+ struct ieee80211_mgmt *mgmt,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ u64 t_t, t_r;
+
+ WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
+
+ /* standard mentions only beacons */
+ if (stype != IEEE80211_STYPE_BEACON)
+ return;
+
+ /* The current tsf is a first approximation for the timestamp
+ * for the received beacon. Further down we try to get a
+ * better value from the rx_status->mactime field if
+ * available. Also we have to call drv_get_tsf() before
+ * entering the rcu-read section.*/
+ t_r = drv_get_tsf(local, sdata);
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta)
+ goto no_sync;
+
+ /* check offset sync conditions (13.13.2.2.1)
+ *
+ * TODO also sync to
+ * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors
+ */
+
+ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
+ clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+ msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
+ goto no_sync;
+ }
+
+ if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) {
+ /*
+ * The mactime is defined as the time the first data symbol
+ * of the frame hits the PHY, and the timestamp of the beacon
+ * is defined as "the time that the data symbol containing the
+ * first bit of the timestamp is transmitted to the PHY plus
+ * the transmitting STA's delays through its local PHY from the
+ * MAC-PHY interface to its interface with the WM" (802.11
+ * 11.1.2)
+ *
+ * T_r, in 13.13.2.2.2, is just defined as "the frame reception
+ * time" but we unless we interpret that time to be the same
+ * time of the beacon timestamp, the offset calculation will be
+ * off. Below we adjust t_r to be "the time at which the first
+ * symbol of the timestamp element in the beacon is received".
+ * This correction depends on the rate.
+ *
+ * Based on similar code in ibss.c
+ */
+ int rate;
+
+ if (rx_status->flag & RX_FLAG_HT) {
+ /* TODO:
+ * In principle there could be HT-beacons (Dual Beacon
+ * HT Operation options), but for now ignore them and
+ * just use the primary (i.e. non-HT) beacons for
+ * synchronization.
+ * */
+ goto no_sync;
+ } else
+ rate = local->hw.wiphy->bands[rx_status->band]->
+ bitrates[rx_status->rate_idx].bitrate;
+
+ /* 24 bytes of header * 8 bits/byte *
+ * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
+ t_r = rx_status->mactime + (24 * 8 * 10 / rate);
+ }
+
+ /* Timing offset calculation (see 13.13.2.2.2) */
+ t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
+ sta->t_offset = t_t - t_r;
+
+ if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
+ s64 t_clockdrift = sta->t_offset_setpoint
+ - sta->t_offset;
+ msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
+ sta->sta.addr,
+ (long long) sta->t_offset,
+ (long long)
+ sta->t_offset_setpoint,
+ (long long) t_clockdrift);
+
+ if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
+ t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
+ msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
+ sta->sta.addr,
+ (long long) t_clockdrift);
+ clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+ goto no_sync;
+ }
+
+ rcu_read_unlock();
+
+ spin_lock_bh(&ifmsh->sync_offset_lock);
+ if (t_clockdrift >
+ ifmsh->sync_offset_clockdrift_max)
+ ifmsh->sync_offset_clockdrift_max
+ = t_clockdrift;
+ spin_unlock_bh(&ifmsh->sync_offset_lock);
+
+ } else {
+ sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+ set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+ msync_dbg("STA %pM : offset was invalid, "
+ " sta->t_offset=%lld",
+ sta->sta.addr,
+ (long long) sta->t_offset);
+ rcu_read_unlock();
+ }
+ return;
+
+no_sync:
+ rcu_read_unlock();
+}
+
+static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+ WARN_ON(ifmsh->mesh_sp_id
+ != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
+ BUG_ON(!rcu_read_lock_held());
+
+ spin_lock_bh(&ifmsh->sync_offset_lock);
+
+ if (ifmsh->sync_offset_clockdrift_max >
+ TOFFSET_MINIMUM_ADJUSTMENT) {
+ /* Since ajusting the tsf here would
+ * require a possibly blocking call
+ * to the driver tsf setter, we punt
+ * the tsf adjustment to the mesh tasklet
+ */
+ msync_dbg("TBTT : kicking off TBTT "
+ "adjustment with "
+ "clockdrift_max=%lld",
+ ifmsh->sync_offset_clockdrift_max);
+ set_bit(MESH_WORK_DRIFT_ADJUST,
+ &ifmsh->wrkq_flags);
+ } else {
+ msync_dbg("TBTT : max clockdrift=%lld; "
+ "too small to adjust",
+ (long long)
+ ifmsh->sync_offset_clockdrift_max);
+ ifmsh->sync_offset_clockdrift_max = 0;
+ }
+ spin_unlock_bh(&ifmsh->sync_offset_lock);
+}
+
+static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 offset;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return NULL;
+
+ offset = ieee80211_ie_split_vendor(ifmsh->ie,
+ ifmsh->ie_len, 0);
+
+ if (!offset)
+ return NULL;
+
+ return ifmsh->ie + offset + 2;
+}
+
+static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+ u16 stype,
+ struct ieee80211_mgmt *mgmt,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
+{
+ const u8 *oui;
+
+ WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
+ msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
+ oui = mesh_get_vendor_oui(sdata);
+ /* here you would implement the vendor offset tracking for this oui */
+}
+
+static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+ const u8 *oui;
+
+ WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
+ msync_dbg("called mesh_sync_vendor_adjust_tbtt");
+ oui = mesh_get_vendor_oui(sdata);
+ /* here you would implement the vendor tsf adjustment for this oui */
+}
+
+/* global variable */
+static struct sync_method sync_methods[] = {
+ {
+ .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
+ .ops = {
+ .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
+ .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
+ }
+ },
+ {
+ .method = IEEE80211_SYNC_METHOD_VENDOR,
+ .ops = {
+ .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
+ .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
+ }
+ },
+};
+
+struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
+{
+ struct ieee80211_mesh_sync_ops *ops = NULL;
+ u8 i;
+
+ for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
+ if (sync_methods[i].method == method) {
+ ops = &sync_methods[i].ops;
+ break;
+ }
+ }
+ return ops;
+}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 576fb25456dd..04c306308987 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -171,122 +171,64 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-/*
- * ieee80211_enable_ht should be called only after the operating band
- * has been determined as ht configuration depends on the hw's
- * HT abilities for a specific band.
- */
-static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_ht_info *hti,
- const u8 *bssid, u16 ap_ht_cap_flags,
- bool beacon_htcap_ie)
+static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_ht_operation *ht_oper,
+ const u8 *bssid, bool reconfig)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
u32 changed = 0;
- int hti_cfreq;
u16 ht_opmode;
- bool enable_ht = true;
- enum nl80211_channel_type prev_chantype;
- enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT;
- enum nl80211_channel_type tx_channel_type;
+ bool disable_40 = false;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- prev_chantype = sdata->vif.bss_conf.channel_type;
-
- hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
- sband->band);
- /* check that channel matches the right operating channel */
- if (local->hw.conf.channel->center_freq != hti_cfreq) {
- /* Some APs mess this up, evidently.
- * Netgear WNDR3700 sometimes reports 4 higher than
- * the actual channel, for instance.
- */
- printk(KERN_DEBUG
- "%s: Wrong control channel in association"
- " response: configured center-freq: %d"
- " hti-cfreq: %d hti->control_chan: %d"
- " band: %d. Disabling HT.\n",
- sdata->name,
- local->hw.conf.channel->center_freq,
- hti_cfreq, hti->control_chan,
- sband->band);
- enable_ht = false;
- }
-
- if (enable_ht) {
- rx_channel_type = NL80211_CHAN_HT20;
-
- if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
- !ieee80111_cfg_override_disables_ht40(sdata) &&
- (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
- switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rx_channel_type = NL80211_CHAN_HT40PLUS;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rx_channel_type = NL80211_CHAN_HT40MINUS;
- break;
- }
- }
+ switch (sdata->vif.bss_conf.channel_type) {
+ case NL80211_CHAN_HT40PLUS:
+ if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ disable_40 = true;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+ disable_40 = true;
+ break;
+ default:
+ break;
}
- tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type);
+ /* This can change during the lifetime of the BSS */
+ if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
+ disable_40 = true;
- if (local->tmp_channel)
- local->tmp_channel_type = rx_channel_type;
-
- if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) {
- /* can only fail due to HT40+/- mismatch */
- rx_channel_type = NL80211_CHAN_HT20;
- WARN_ON(!ieee80211_set_channel_type(local, sdata,
- rx_channel_type));
- }
-
- if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) {
- /*
- * Whenever the AP announces the HT mode change that can be
- * 40MHz intolerant or etc., it would be safer to stop tx
- * queues before doing hw config to avoid buffer overflow.
- */
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, bssid);
- /* flush out all packets */
- synchronize_net();
+ WARN_ON_ONCE(!sta);
- drv_flush(local, false);
- }
+ if (sta && !sta->supports_40mhz)
+ disable_40 = true;
- /* channel_type change automatically detected */
- ieee80211_hw_config(local, 0);
+ if (sta && (!reconfig ||
+ (disable_40 != !(sta->sta.ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
- if (prev_chantype != tx_channel_type) {
- rcu_read_lock();
- sta = sta_info_get(sdata, bssid);
- if (sta)
- rate_control_rate_update(local, sband, sta,
- IEEE80211_RC_HT_CHANGED,
- tx_channel_type);
- rcu_read_unlock();
+ if (disable_40)
+ sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ else
+ sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- if (beacon_htcap_ie)
- ieee80211_wake_queues_by_reason(&sdata->local->hw,
- IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
+ rate_control_rate_update(local, sband, sta,
+ IEEE80211_RC_BW_CHANGED);
}
+ mutex_unlock(&local->sta_mtx);
- ht_opmode = le16_to_cpu(hti->operation_mode);
+ ht_opmode = le16_to_cpu(ht_oper->operation_mode);
/* if bss configuration changed store the new one */
- if (sdata->ht_opmode_valid != enable_ht ||
- sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
- prev_chantype != rx_channel_type) {
+ if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) {
changed |= BSS_CHANGED_HT;
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
- sdata->ht_opmode_valid = enable_ht;
}
return changed;
@@ -316,12 +258,12 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
}
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, const u8 *ht_info_ie,
+ struct sk_buff *skb, const u8 *ht_oper_ie,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps)
{
- struct ieee80211_ht_info *ht_info;
+ struct ieee80211_ht_operation *ht_oper;
u8 *pos;
u32 flags = channel->flags;
u16 cap;
@@ -329,21 +271,21 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
- if (!ht_info_ie)
+ if (!ht_oper_ie)
return;
- if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
+ if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
return;
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
- ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
+ ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
/* determine capability flags */
cap = ht_cap.cap;
- switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -358,6 +300,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /*
+ * If 40 MHz was disabled associate as though we weren't
+ * capable of 40 MHz -- some broken APs will never fall
+ * back to trying to transmit in 20 MHz.
+ */
+ if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+
/* set SM PS mode properly */
cap &= ~IEEE80211_HT_CAP_SM_PS;
switch (smps) {
@@ -557,7 +509,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
- ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie,
+ ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
sband, local->oper_channel, ifmgd->ap_smps);
/* if present, add any custom non-vendor IEs that go after HT */
@@ -1182,7 +1134,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
if (!local->ops->conf_tx)
return;
- if (local->hw.queues < 4)
+ if (local->hw.queues < IEEE80211_NUM_ACS)
return;
if (!wmm_param)
@@ -1435,7 +1387,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.assoc = false;
/* on the next assoc, re-program HT parameters */
- sdata->ht_opmode_valid = false;
memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
@@ -1496,19 +1447,24 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
+ mutex_lock(&local->mtx);
if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL)))
- return;
+ IEEE80211_STA_CONNECTION_POLL))) {
+ mutex_unlock(&local->mtx);
+ return;
+ }
ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
- mutex_lock(&sdata->local->iflist_mtx);
- ieee80211_recalc_ps(sdata->local, -1);
- mutex_unlock(&sdata->local->iflist_mtx);
+
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_ps(local, -1);
+ mutex_unlock(&local->iflist_mtx);
if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- return;
+ goto out;
/*
* We've received a probe response, but are not sure whether
@@ -1520,6 +1476,9 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
IEEE80211_CONNECTION_IDLE_TIME));
+out:
+ ieee80211_run_deferred_scan(local);
+ mutex_unlock(&local->mtx);
}
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1563,18 +1522,28 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
* anymore. The timeout will be reset if the frame is ACKed by
* the AP.
*/
+ ifmgd->probe_send_count++;
+
if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, 0);
} else {
+ int ssid_len;
+
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
- (u32) -1, true, false);
+ if (WARN_ON_ONCE(ssid == NULL))
+ ssid_len = 0;
+ else
+ ssid_len = ssid[1];
+
+ ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
+ 0, (u32) -1, true, false);
}
- ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(ifmgd, ifmgd->probe_timeout);
+ if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ drv_flush(sdata->local, false);
}
static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -1586,21 +1555,22 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_sdata_running(sdata))
return;
- if (sdata->local->scanning)
- return;
-
- if (sdata->local->tmp_channel)
- return;
-
mutex_lock(&ifmgd->mtx);
if (!ifmgd->associated)
goto out;
+ mutex_lock(&sdata->local->mtx);
+
+ if (sdata->local->tmp_channel || sdata->local->scanning) {
+ mutex_unlock(&sdata->local->mtx);
+ goto out;
+ }
+
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (beacon && net_ratelimit())
- printk(KERN_DEBUG "%s: detected beacon loss from AP "
- "- sending probe request\n", sdata->name);
+ if (beacon)
+ net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
+ sdata->name);
#endif
/*
@@ -1623,6 +1593,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
else
ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
+ mutex_unlock(&sdata->local->mtx);
+
if (already)
goto out;
@@ -1643,6 +1615,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct sk_buff *skb;
const u8 *ssid;
+ int ssid_len;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return NULL;
@@ -1653,8 +1626,13 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
return NULL;
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ if (WARN_ON_ONCE(ssid == NULL))
+ ssid_len = 0;
+ else
+ ssid_len = ssid[1];
+
skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
- (u32) -1, ssid + 2, ssid[1],
+ (u32) -1, ssid + 2, ssid_len,
NULL, 0, true);
return skb;
@@ -1799,7 +1777,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
- if (compare_ether_addr(bssid, mgmt->bssid))
+ if (!ether_addr_equal(bssid, mgmt->bssid))
return RX_MGMT_NONE;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
@@ -1876,7 +1854,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
bssid = ifmgd->associated->bssid;
@@ -1909,7 +1887,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -2000,7 +1978,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
u32 changed = 0;
int err;
- u16 ap_ht_cap_flags;
/* AssocResp and ReassocResp have identical structure */
@@ -2051,7 +2028,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems.ht_cap_elem, &sta->sta.ht_cap);
- ap_ht_cap_flags = sta->sta.ht_cap.cap;
+ sta->supports_40mhz =
+ sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
rate_control_rate_init(sta);
@@ -2092,11 +2070,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee80211_set_wmm_default(sdata, false);
changed |= BSS_CHANGED_QOS;
- if (elems.ht_info_elem && elems.wmm_param &&
+ if (elems.ht_operation && elems.wmm_param &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
- changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- cbss->bssid, ap_ht_cap_flags,
- false);
+ changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
+ cbss->bssid, false);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
@@ -2137,7 +2114,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return RX_MGMT_NONE;
- if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid))
+ if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
return RX_MGMT_NONE;
/*
@@ -2217,8 +2194,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
bool need_ps = false;
if (sdata->u.mgd.associated &&
- compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
bss = (void *)sdata->u.mgd.associated->priv;
/* not previously set so we may need to recalc */
need_ps = !bss->dtim_period;
@@ -2273,7 +2249,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ASSERT_MGD_MTX(ifmgd);
- if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -2286,12 +2262,11 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
if (ifmgd->associated &&
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0)
+ ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
ieee80211_reset_ap_probe(sdata);
if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
- compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
/* got probe response, continue with auth */
printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
ifmgd->auth_data->tries = 0;
@@ -2319,7 +2294,7 @@ static const u64 care_about_ies =
(1ULL << WLAN_EID_CHANNEL_SWITCH) |
(1ULL << WLAN_EID_PWR_CONSTRAINT) |
(1ULL << WLAN_EID_HT_CAPABILITY) |
- (1ULL << WLAN_EID_HT_INFORMATION);
+ (1ULL << WLAN_EID_HT_OPERATION);
static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
@@ -2348,8 +2323,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
return;
if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
- compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid)
- == 0) {
+ ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
ieee802_11_parse_elems(mgmt->u.beacon.variable,
len - baselen, &elems);
@@ -2364,7 +2338,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
return;
bssid = ifmgd->associated->bssid;
@@ -2431,10 +2405,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: cancelling probereq poll due "
- "to a received beacon\n", sdata->name);
- }
+ net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
+ sdata->name);
#endif
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
mutex_lock(&local->iflist_mtx);
@@ -2468,11 +2440,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
if (directed_tim) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
- local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local,
- IEEE80211_CONF_CHANGE_PS);
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local,
+ IEEE80211_CONF_CHANGE_PS);
+ }
ieee80211_send_nullfunc(local, sdata, 0);
- } else {
+ } else if (!local->pspolling && sdata->u.mgd.powersave) {
local->pspolling = true;
/*
@@ -2504,31 +2478,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
erp_valid, erp_value);
- if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
+ if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
- struct sta_info *sta;
struct ieee80211_supported_band *sband;
- u16 ap_ht_cap_flags;
-
- rcu_read_lock();
-
- sta = sta_info_get(sdata, bssid);
- if (WARN_ON(!sta)) {
- rcu_read_unlock();
- return;
- }
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
- elems.ht_cap_elem, &sta->sta.ht_cap);
-
- ap_ht_cap_flags = sta->sta.ht_cap.cap;
-
- rcu_read_unlock();
-
- changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- bssid, ap_ht_cap_flags, true);
+ changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
+ bssid, true);
}
/* Note: country IE parsing is done for us by cfg80211 */
@@ -3060,6 +3017,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
bool have_sta = false;
int err;
+ int ht_cfreq;
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ const u8 *ht_oper_ie;
+ const struct ieee80211_ht_operation *ht_oper = NULL;
+ struct ieee80211_supported_band *sband;
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;
@@ -3081,17 +3043,76 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->mtx);
/* switch to the right channel */
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
+
+ if (sband->ht_cap.ht_supported) {
+ ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
+ cbss->information_elements,
+ cbss->len_information_elements);
+ if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
+ ht_oper = (void *)(ht_oper_ie + 2);
+ }
+
+ if (ht_oper) {
+ ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
+ cbss->channel->band);
+ /* check that channel matches the right operating channel */
+ if (cbss->channel->center_freq != ht_cfreq) {
+ /*
+ * It's possible that some APs are confused here;
+ * Netgear WNDR3700 sometimes reports 4 higher than
+ * the actual channel in association responses, but
+ * since we look at probe response/beacon data here
+ * it should be OK.
+ */
+ printk(KERN_DEBUG
+ "%s: Wrong control channel: center-freq: %d"
+ " ht-cfreq: %d ht->primary_chan: %d"
+ " band: %d. Disabling HT.\n",
+ sdata->name, cbss->channel->center_freq,
+ ht_cfreq, ht_oper->primary_chan,
+ cbss->channel->band);
+ ht_oper = NULL;
+ }
+ }
+
+ if (ht_oper) {
+ channel_type = NL80211_CHAN_HT20;
+
+ if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ switch (ht_oper->ht_param &
+ IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ channel_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ channel_type = NL80211_CHAN_HT40MINUS;
+ break;
+ }
+ }
+ }
+
+ if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+ /* can only fail due to HT40+/- mismatch */
+ channel_type = NL80211_CHAN_HT20;
+ printk(KERN_DEBUG
+ "%s: disabling 40 MHz due to multi-vif mismatch\n",
+ sdata->name);
+ ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata,
+ channel_type));
+ }
+
local->oper_channel = cbss->channel;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_config(local, 0);
if (!have_sta) {
- struct ieee80211_supported_band *sband;
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
- sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
-
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
@@ -3141,7 +3162,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return err;
}
} else
- WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid));
+ WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
return 0;
}
@@ -3281,7 +3302,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
bool match;
/* keep sta info, bssid if matching */
- match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0;
+ match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
ieee80211_destroy_auth_data(sdata, match);
}
@@ -3311,7 +3332,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
- local->hw.queues < 4 || !bss->wmm_used)
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3334,11 +3355,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->ap_smps = ifmgd->req_smps;
assoc_data->capability = req->bss->capability;
- assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4);
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
- assoc_data->ht_information_ie =
- ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
+ assoc_data->ht_operation_ie =
+ ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
if (bss->wmm_used && bss->uapsd_supported &&
(sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3387,8 +3409,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
*/
printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
sdata->name, ifmgd->bssid);
- assoc_data->timeout = jiffies +
- TU_TO_EXP_TIME(req->bss->beacon_interval);
+ assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
} else {
assoc_data->have_beacon = true;
assoc_data->sent_assoc = false;
@@ -3441,7 +3462,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
sdata->name, req->bssid, req->reason_code);
if (ifmgd->associated &&
- compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0)
+ ether_addr_equal(ifmgd->associated->bssid, req->bssid))
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, true, frame_buf);
else
@@ -3498,7 +3519,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata)
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ef8eba1d736d..af1c4e26e965 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -127,6 +127,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
drv_remove_interface(local, sdata);
}
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (sdata)
+ drv_remove_interface(local, sdata);
+
/* stop hardware - this must stop RX */
if (local->open_count)
ieee80211_stop_device(local);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index fbb1efdc4d04..6e4fd32c6617 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -17,6 +17,7 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
+#include "driver-ops.h"
struct rate_control_ref {
struct ieee80211_local *local;
@@ -63,8 +64,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
static inline void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed,
- enum nl80211_channel_type oper_chan_type)
+ struct sta_info *sta, u32 changed)
{
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
@@ -72,7 +72,8 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
if (ref && ref->ops->rate_update)
ref->ops->rate_update(ref->priv, sband, ista,
- priv_sta, changed, oper_chan_type);
+ priv_sta, changed);
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index b39dda523f39..79633ae06fd6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,14 +334,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
-calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+calc_rate_durations(enum ieee80211_band band,
+ struct minstrel_rate *d,
struct ieee80211_rate *rate)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
- d->perfect_tx_time = ieee80211_frame_duration(local, 1200,
+ d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
rate->bitrate, erp, 1);
- d->ack_time = ieee80211_frame_duration(local, 10,
+ d->ack_time = ieee80211_frame_duration(band, 10,
rate->bitrate, erp, 1);
}
@@ -379,14 +380,14 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
- struct ieee80211_local *local = hw_to_local(mp->hw);
struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
mi->lowest_rix = rate_lowest_index(sband, sta);
ctl_rate = &sband->bitrates[mi->lowest_rix];
- mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
+ mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
+ ctl_rate->bitrate,
!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
for (i = 0; i < sband->n_bitrates; i++) {
@@ -402,7 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(local, mr, &sband->bitrates[i]);
+ calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 16e0b277b9a8..2d1acc6c5445 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -686,14 +686,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- enum nl80211_channel_type oper_chan_type)
+ struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
- struct ieee80211_local *local = hw_to_local(mp->hw);
u16 sta_cap = sta->ht_cap.cap;
int n_supported = 0;
int ack_dur;
@@ -712,8 +710,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
memset(mi, 0, sizeof(*mi));
mi->stats_update = jiffies;
- ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
- mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
+ ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
+ mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -735,10 +733,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
- if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
- oper_chan_type != NL80211_CHAN_HT40PLUS)
- sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-
smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT;
@@ -788,17 +782,15 @@ static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
- struct minstrel_priv *mp = priv;
-
- minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
+ minstrel_ht_update_caps(priv, sband, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
- u32 changed, enum nl80211_channel_type oper_chan_type)
+ u32 changed)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
+ minstrel_ht_update_caps(priv, sband, sta, priv_sta);
}
static void *
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bcfe8c77c839..7bcecf73aafb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -103,7 +103,7 @@ static void
ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_rate *rate,
- int rtap_len)
+ int rtap_len, bool has_fcs)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
@@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
}
/* IEEE80211_RADIOTAP_FLAGS */
- if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
+ if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
*pos |= IEEE80211_RADIOTAP_F_FCS;
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
*pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -204,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
if (status->flag & RX_FLAG_HT) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
- *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
- IEEE80211_RADIOTAP_MCS_HAVE_GI |
- IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ *pos++ = local->hw.radiotap_mcs_details;
*pos = 0;
if (status->flag & RX_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_MCS_SGI;
if (status->flag & RX_FLAG_40MHZ)
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (status->flag & RX_FLAG_HT_GF)
+ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos++;
*pos++ = status->rate_idx;
}
@@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
}
/* prepend radiotap information */
- ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
+ true);
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -425,6 +426,7 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
local->sched_scanning)
return ieee80211_scan_rx(rx->sdata, skb);
@@ -490,12 +492,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_has_tods(hdr->frame_control) ||
!ieee80211_has_fromds(hdr->frame_control))
return RX_DROP_MONITOR;
- if (compare_ether_addr(hdr->addr3, dev_addr) == 0)
+ if (ether_addr_equal(hdr->addr3, dev_addr))
return RX_DROP_MONITOR;
} else {
if (!ieee80211_has_a4(hdr->frame_control))
return RX_DROP_MONITOR;
- if (compare_ether_addr(hdr->addr4, dev_addr) == 0)
+ if (ether_addr_equal(hdr->addr4, dev_addr))
return RX_DROP_MONITOR;
}
}
@@ -793,8 +795,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
/* reset session timer */
if (tid_agg_rx->timeout)
- mod_timer(&tid_agg_rx->session_timer,
- TU_TO_EXP_TIME(tid_agg_rx->timeout));
+ tid_agg_rx->last_rx = jiffies;
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
@@ -1274,7 +1275,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
NL80211_IFTYPE_ADHOC);
- if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
+ if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
sta->last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control)) {
sta->last_rx_rate_idx = status->rate_idx;
@@ -1437,8 +1438,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
*/
if (((hdr->frame_control ^ f_hdr->frame_control) &
cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
- compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
- compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
+ !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
+ !ether_addr_equal(hdr->addr2, f_hdr->addr2))
continue;
if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
@@ -1713,8 +1714,8 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* of whether the frame was encrypted or not.
*/
if (ehdr->h_proto == rx->sdata->control_port_protocol &&
- (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
- compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
+ (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+ ether_addr_equal(ehdr->h_dest, pae_group_addr)))
return true;
if (ieee80211_802_1x_port_control(rx) ||
@@ -1751,9 +1752,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* local net stack and back to the wireless medium
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
- if (!xmit_skb && net_ratelimit())
- printk(KERN_DEBUG "%s: failed to clone "
- "multicast frame\n", dev->name);
+ if (!xmit_skb)
+ net_dbg_ratelimited("%s: failed to clone multicast frame\n",
+ dev->name);
} else {
dsta = sta_info_get(sdata, skb->data);
if (dsta) {
@@ -1924,7 +1925,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mpp_path_add(proxied_addr, mpp_addr, sdata);
} else {
spin_lock_bh(&mppath->state_lock);
- if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
+ if (!ether_addr_equal(mppath->mpp, mpp_addr))
memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
spin_unlock_bh(&mppath->state_lock);
}
@@ -1933,7 +1934,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* Frame has reached destination. Don't forward */
if (!is_multicast_ether_addr(hdr->addr1) &&
- compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
+ ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
q = ieee80211_select_queue_80211(local, skb, hdr);
@@ -1956,9 +1957,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- sdata->name);
+ net_dbg_ratelimited("%s: failed to clone mesh frame\n",
+ sdata->name);
goto out;
}
@@ -2121,13 +2121,13 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
- if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
/* Not to own unicast address */
return;
}
- if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
- compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
+ if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
+ !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
/* Not from the current AP or not associated yet. */
return;
}
@@ -2269,11 +2269,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
sband = rx->local->hw.wiphy->bands[status->band];
- rate_control_rate_update(
- local, sband, rx->sta,
- IEEE80211_RC_SMPS_CHANGED,
- ieee80211_get_tx_channel_type(
- local, local->_oper_channel_type));
+ rate_control_rate_update(local, sband, rx->sta,
+ IEEE80211_RC_SMPS_CHANGED);
goto handled;
}
default:
@@ -2340,7 +2337,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (sdata->vif.type != NL80211_IFTYPE_STATION)
break;
- if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid))
+ if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
break;
goto queue;
@@ -2571,7 +2568,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
goto out_free_skb;
/* prepend radiotap information */
- ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
+ false);
skb_set_mac_header(skb, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2773,7 +2771,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
if (!bssid && !sdata->u.mgd.use_4addr)
return 0;
if (!multicast &&
- compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC) ||
sdata->u.mgd.use_4addr)
return 0;
@@ -2791,8 +2789,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!multicast &&
- compare_ether_addr(sdata->vif.addr,
- hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2808,8 +2805,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
break;
case NL80211_IFTYPE_MESH_POINT:
if (!multicast &&
- compare_ether_addr(sdata->vif.addr,
- hdr->addr1) != 0) {
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2819,8 +2815,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
if (!bssid) {
- if (compare_ether_addr(sdata->vif.addr,
- hdr->addr1))
+ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
@@ -2842,7 +2837,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
return 0;
- if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
+ if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
return 0;
break;
default:
@@ -2919,6 +2914,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
test_bit(SCAN_SW_SCANNING, &local->scanning)))
status->rx_flags |= IEEE80211_RX_IN_SCAN;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index c70e17677135..169da0742c81 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -29,20 +29,6 @@
#define IEEE80211_CHANNEL_TIME (HZ / 33)
#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
-struct ieee80211_bss *
-ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
- u8 *ssid, u8 ssid_len)
-{
- struct cfg80211_bss *cbss;
-
- cbss = cfg80211_get_bss(local->hw.wiphy,
- ieee80211_get_channel(local->hw.wiphy, freq),
- bssid, ssid, ssid_len, 0, 0);
- if (!cbss)
- return NULL;
- return (void *)cbss->priv;
-}
-
static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
{
struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -208,7 +194,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
presp = ieee80211_is_probe_resp(fc);
if (presp) {
/* ignore ProbeResp to foreign address */
- if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
return RX_DROP_MONITOR;
presp = true;
@@ -387,6 +373,57 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
return 0;
}
+static bool ieee80211_can_scan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ if (!list_empty(&local->work_list))
+ return false;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ return false;
+
+ return true;
+}
+
+void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+{
+ lockdep_assert_held(&local->mtx);
+
+ if (!local->scan_req || local->scanning)
+ return;
+
+ if (!ieee80211_can_scan(local, local->scan_sdata))
+ return;
+
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+ round_jiffies_relative(0));
+}
+
+static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ int i;
+ struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ enum ieee80211_band band = local->hw.conf.channel->band;
+
+ for (i = 0; i < local->scan_req->n_ssids; i++)
+ ieee80211_send_probe_req(
+ sdata, NULL,
+ local->scan_req->ssids[i].ssid,
+ local->scan_req->ssids[i].ssid_len,
+ local->scan_req->ie, local->scan_req->ie_len,
+ local->scan_req->rates[band], false,
+ local->scan_req->no_cck);
+
+ /*
+ * After sending probe requests, wait for probe responses
+ * on the channel.
+ */
+ *next_delay = IEEE80211_CHANNEL_TIME;
+ local->next_scan_state = SCAN_DECISION;
+}
static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
@@ -399,7 +436,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
return -EBUSY;
- if (!list_empty(&local->work_list)) {
+ if (!ieee80211_can_scan(local, sdata)) {
/* wait for the work to finish/time out */
local->scan_req = req;
local->scan_sdata = sdata;
@@ -438,10 +475,47 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->scan_req = req;
local->scan_sdata = sdata;
- if (local->ops->hw_scan)
+ if (local->ops->hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
- else
+ } else if ((req->n_channels == 1) &&
+ (req->channels[0]->center_freq ==
+ local->hw.conf.channel->center_freq)) {
+
+ /* If we are scanning only on the current channel, then
+ * we do not need to stop normal activities
+ */
+ unsigned long next_delay;
+
+ __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
+
+ ieee80211_recalc_idle(local);
+
+ /* Notify driver scan is starting, keep order of operations
+ * same as normal software scan, in case that matters. */
+ drv_sw_scan_start(local);
+
+ ieee80211_configure_filter(local); /* accept probe-responses */
+
+ /* We need to ensure power level is at max for scanning. */
+ ieee80211_hw_config(local, 0);
+
+ if ((req->channels[0]->flags &
+ IEEE80211_CHAN_PASSIVE_SCAN) ||
+ !local->scan_req->n_ssids) {
+ next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ } else {
+ ieee80211_scan_state_send_probe(local, &next_delay);
+ next_delay = IEEE80211_CHANNEL_TIME;
+ }
+
+ /* Now, just wait a bit and we are all done! */
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+ next_delay);
+ return 0;
+ } else {
+ /* Do normal software scan */
__set_bit(SCAN_SW_SCANNING, &local->scanning);
+ }
ieee80211_recalc_idle(local);
@@ -598,30 +672,6 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->next_scan_state = SCAN_SEND_PROBE;
}
-static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
- unsigned long *next_delay)
-{
- int i;
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
- enum ieee80211_band band = local->hw.conf.channel->band;
-
- for (i = 0; i < local->scan_req->n_ssids; i++)
- ieee80211_send_probe_req(
- sdata, NULL,
- local->scan_req->ssids[i].ssid,
- local->scan_req->ssids[i].ssid_len,
- local->scan_req->ie, local->scan_req->ie_len,
- local->scan_req->rates[band], false,
- local->scan_req->no_cck);
-
- /*
- * After sending probe requests, wait for probe responses
- * on the channel.
- */
- *next_delay = IEEE80211_CHANNEL_TIME;
- local->next_scan_state = SCAN_DECISION;
-}
-
static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
@@ -672,6 +722,12 @@ void ieee80211_scan_work(struct work_struct *work)
sdata = local->scan_sdata;
+ /* When scanning on-channel, the first-callback means completed. */
+ if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
+ aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
+ goto out_complete;
+ }
+
if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
goto out_complete;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 38137cb5f6f0..f5b1638fbf80 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -102,7 +102,7 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
- compare_ether_addr(sta->sta.addr, addr) == 0)
+ ether_addr_equal(sta->sta.addr, addr))
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
@@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
- compare_ether_addr(sta->sta.addr, addr) == 0)
+ ether_addr_equal(sta->sta.addr, addr))
break;
sta = rcu_dereference_check(sta->hnext,
lockdep_is_held(&local->sta_mtx));
@@ -302,7 +302,7 @@ static int sta_info_insert_check(struct sta_info *sta)
if (unlikely(!ieee80211_sdata_running(sdata)))
return -ENETDOWN;
- if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
+ if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
is_multicast_ether_addr(sta->sta.addr)))
return -EINVAL;
@@ -912,7 +912,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
*/
for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
if (localaddr &&
- compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0)
+ !ether_addr_equal(sta->sdata->vif.addr, localaddr))
continue;
if (!sta->uploaded)
return NULL;
@@ -1195,13 +1195,15 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
ieee80211_is_qos_nullfunc(hdr->frame_control))
qoshdr = ieee80211_get_qos_ctl(hdr);
- /* set EOSP for the frame */
- if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
- qoshdr && skb_queue_empty(&frames))
- *qoshdr |= IEEE80211_QOS_CTL_EOSP;
+ /* end service period after last frame */
+ if (skb_queue_empty(&frames)) {
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
+ qoshdr)
+ *qoshdr |= IEEE80211_QOS_CTL_EOSP;
- info->flags |= IEEE80211_TX_STATUS_EOSP |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+ }
if (qoshdr)
tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
@@ -1415,15 +1417,19 @@ int sta_info_move_state(struct sta_info *sta,
if (sta->sta_state == IEEE80211_STA_AUTH) {
set_bit(WLAN_STA_ASSOC, &sta->_flags);
} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
- if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
- atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+ (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !sta->sdata->u.vlan.sta))
+ atomic_dec(&sta->sdata->bss->num_mcast_sta);
clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
}
break;
case IEEE80211_STA_AUTHORIZED:
if (sta->sta_state == IEEE80211_STA_ASSOC) {
- if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
- atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+ (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !sta->sdata->u.vlan.sta))
+ atomic_inc(&sta->sdata->bss->num_mcast_sta);
set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
}
break;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ab0576827baf..3bb24a121c95 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -55,6 +55,7 @@
* @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
* @WLAN_STA_INSERTED: This station is inserted into the hash table.
* @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
+ * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH,
@@ -76,6 +77,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_4ADDR_EVENT,
WLAN_STA_INSERTED,
WLAN_STA_RATE_CONTROL,
+ WLAN_STA_TOFFSET_KNOWN,
};
#define STA_TID_NUM 16
@@ -101,6 +103,7 @@ enum ieee80211_sta_info_flags {
* @dialog_token: dialog token for aggregation session
* @timeout: session timeout value to be filled in ADDBA requests
* @state: session state (see above)
+ * @last_tx: jiffies of last tx activity
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
* @buf_size: reorder buffer size at receiver
@@ -122,6 +125,7 @@ struct tid_ampdu_tx {
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
unsigned long state;
+ unsigned long last_tx;
u16 timeout;
u8 dialog_token;
u8 stop_initiator;
@@ -139,6 +143,7 @@ struct tid_ampdu_tx {
* @reorder_time: jiffies when skb was added
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
* @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
* @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer
* @ssn: Starting Sequence Number expected to be aggregated.
@@ -163,6 +168,7 @@ struct tid_ampdu_rx {
unsigned long *reorder_time;
struct timer_list session_timer;
struct timer_list reorder_timer;
+ unsigned long last_rx;
u16 head_seq_num;
u16 stored_mpdu_num;
u16 ssn;
@@ -264,6 +270,7 @@ struct sta_ampdu_mlme {
* @plink_timeout: timeout of peer link
* @plink_timer: peer link watch timer
* @plink_timer_was_running: used by suspend/resume to restore timers
+ * @t_offset: timing offset relative to this host
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
@@ -353,6 +360,9 @@ struct sta_info {
enum nl80211_plink_state plink_state;
u32 plink_timeout;
struct timer_list plink_timer;
+ s64 t_offset;
+ s64 t_offset_setpoint;
+ enum nl80211_channel_type ch_type;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -365,6 +375,8 @@ struct sta_info {
unsigned int lost_packets;
unsigned int beacon_loss_count;
+ bool supports_40mhz;
+
/* keep last! */
struct ieee80211_sta sta;
};
@@ -490,7 +502,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
/* compare address and run code only if it matches */ \
- if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0)
+ if (ether_addr_equal(_sta->sta.addr, (_addr)))
/*
* Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 5f8f89e89d6b..28cfa981cfb1 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -355,7 +355,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int rtap_len;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
- if (info->status.rates[i].idx < 0) {
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
+ /* just the first aggr frame carry status info */
+ info->status.rates[i].idx = -1;
+ info->status.rates[i].count = 0;
+ break;
+ } else if (info->status.rates[i].idx < 0) {
break;
} else if (i >= hw->max_report_rates) {
/* the HW cannot have attempted that rate */
@@ -378,7 +384,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
- if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
+ if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
if (info->flags & IEEE80211_TX_STATUS_EOSP)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df4..847215bb2a6f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,13 +153,13 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
if (ieee80211_is_data_qos(hdr->frame_control) &&
- *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
dur = 0;
else
/* Time needed to transmit ACK
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
- dur = ieee80211_frame_duration(local, 10, rate, erp,
+ dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
tx->sdata->vif.bss_conf.use_short_preamble);
if (next_frag_len) {
@@ -167,7 +167,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
* transmit next fragment plus ACK and 2 x SIFS. */
dur *= 2; /* ACK + SIFS */
/* next fragment */
- dur += ieee80211_frame_duration(local, next_frag_len,
+ dur += ieee80211_frame_duration(sband->band, next_frag_len,
txrate->bitrate, erp,
tx->sdata->vif.bss_conf.use_short_preamble);
}
@@ -230,9 +230,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
* changed via debugfs, user needs to reassociate manually to have
* everything in sync.
*/
- if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
- && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
- && skb_get_queue_mapping(tx->skb) == 0)
+ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
+ (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
+ skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
return TX_CONTINUE;
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -306,7 +306,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
}
} else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
ieee80211_is_data(hdr->frame_control) &&
- !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) {
+ !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) {
/*
* No associated STAs - no need to send multicast
* frames.
@@ -400,6 +400,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+ if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ info->hw_queue = tx->sdata->vif.cab_queue;
/* device releases frame after DTIM beacon */
if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
@@ -411,9 +413,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
- tx->sdata->name);
+ net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
+ tx->sdata->name);
#endif
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
@@ -474,10 +475,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: STA %pM TX buffer for "
- "AC %d full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr, ac);
+ net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ tx->sdata->name, sta->sta.addr, ac);
#endif
dev_kfree_skb(old);
} else
@@ -1118,8 +1117,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
/* reset session timer */
if (reset_agg_timer && tid_tx->timeout)
- mod_timer(&tid_tx->session_timer,
- TU_TO_EXP_TIME(tid_tx->timeout));
+ tid_tx->last_tx = jiffies;
return queued;
}
@@ -1158,7 +1156,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
return TX_DROP;
- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+ tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
if (!tx->sta)
@@ -1215,11 +1214,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
bool txpending)
{
struct sk_buff *skb, *tmp;
- struct ieee80211_tx_info *info;
unsigned long flags;
skb_queue_walk_safe(skbs, skb, tmp) {
- int q = skb_get_queue_mapping(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int q = info->hw_queue;
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (WARN_ON_ONCE(q >= local->hw.queues)) {
+ __skb_unlink(skb, skbs);
+ dev_kfree_skb(skb);
+ continue;
+ }
+#endif
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->queue_stop_reasons[q] ||
@@ -1241,7 +1248,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- info = IEEE80211_SKB_CB(skb);
info->control.vif = vif;
info->control.sta = sta;
@@ -1284,8 +1290,16 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- sdata = NULL;
- vif = NULL;
+ sdata = rcu_dereference(local->monitor_sdata);
+ if (sdata) {
+ vif = &sdata->vif;
+ info->hw_queue =
+ vif->hw_queue[skb_get_queue_mapping(skb)];
+ } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+ dev_kfree_skb(skb);
+ return true;
+ } else
+ vif = NULL;
break;
case NL80211_IFTYPE_AP_VLAN:
sdata = container_of(sdata->bss,
@@ -1400,6 +1414,12 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
tx.channel = local->hw.conf.channel;
info->band = tx.channel->band;
+ /* set up hw_queue value early */
+ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+ !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+ info->hw_queue =
+ sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
if (!invoke_tx_handlers(&tx))
result = __ieee80211_tx(local, &tx.skbs, led_len,
tx.sta, txpending);
@@ -1468,12 +1488,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (ieee80211_vif_is_mesh(&sdata->vif) &&
ieee80211_is_data(hdr->frame_control) &&
- !is_multicast_ether_addr(hdr->addr1))
- if (mesh_nexthop_resolve(skb, sdata)) {
- /* skb queued: don't free */
- rcu_read_unlock();
- return;
- }
+ !is_multicast_ether_addr(hdr->addr1) &&
+ mesh_nexthop_resolve(skb, sdata)) {
+ /* skb queued: don't free */
+ rcu_read_unlock();
+ return;
+ }
ieee80211_set_qos_hdr(sdata, skb);
ieee80211_tx(sdata, skb, false);
@@ -1642,7 +1662,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
u8 *payload = (u8 *)hdr + hdrlen;
- if (compare_ether_addr(payload, rfc1042_header) == 0)
+ if (ether_addr_equal(payload, rfc1042_header))
skb->protocol = cpu_to_be16((payload[6] << 8) |
payload[7]);
}
@@ -1675,7 +1695,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
continue;
- if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+ if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
sdata = tmp_sdata;
break;
}
@@ -1792,9 +1812,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* is being proxied by a portal (i.e. portal address
* differs from proxied address)
*/
- if (compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0 &&
- !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
+ if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
+ !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
rcu_read_unlock();
@@ -1929,7 +1948,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
wme_sta = true;
/* receiver and we are QoS enabled, use a QoS type frame */
- if (wme_sta && local->hw.queues >= 4) {
+ if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1941,12 +1960,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
!is_multicast_ether_addr(hdr.addr1) && !authorized &&
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
- compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
+ !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: dropped frame to %pM"
- " (unauthorized port)\n", dev->name,
- hdr.addr1);
+ net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
+ dev->name, hdr.addr1);
#endif
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
@@ -2170,7 +2187,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
- struct ieee80211_sub_if_data *sdata;
unsigned long flags;
int i;
bool txok;
@@ -2207,8 +2223,7 @@ void ieee80211_tx_pending(unsigned long data)
}
if (skb_queue_empty(&local->pending[i]))
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_wake_subqueue(sdata->dev, i);
+ ieee80211_propagate_queue_wake(local, i);
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -2374,6 +2389,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
IEEE80211_STYPE_BEACON);
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_mgmt *mgmt;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 *pos;
int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
sizeof(mgmt->u.beacon);
@@ -2383,6 +2399,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
goto out;
#endif
+ if (ifmsh->sync_ops)
+ ifmsh->sync_ops->adjust_tbtt(
+ sdata);
+
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* NULL SSID */
@@ -2390,7 +2410,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2 + 3 + /* DS params */
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
- 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + sizeof(struct ieee80211_ht_operation) +
2 + sdata->u.mesh.mesh_id_len +
2 + sizeof(struct ieee80211_meshconf_ie) +
sdata->u.mesh.ie_len);
@@ -2414,12 +2434,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
mesh_add_ds_params_ie(skb, sdata) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_ht_cap_ie(skb, sdata) ||
- mesh_add_ht_info_ie(skb, sdata) ||
+ mesh_add_ht_oper_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata) ||
mesh_add_vendor_ies(skb, sdata)) {
@@ -2603,7 +2623,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
pos = skb_put(skb, ie_ssid_len);
*pos++ = WLAN_EID_SSID;
*pos++ = ssid_len;
- if (ssid)
+ if (ssid_len)
memcpy(pos, ssid, ssid_len);
pos += ssid_len;
@@ -2710,11 +2730,13 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid)
{
+ int ac = ieee802_1d_to_ac[tid];
+
skb_set_mac_header(skb, 0);
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
- skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+ skb_set_queue_mapping(skb, ac);
skb->priority = tid;
/*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32f7a3b3d43c..a44c6807df01 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -106,7 +106,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
}
}
-int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
+int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble)
{
int dur;
@@ -120,7 +120,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
* DIV_ROUND_UP() operations.
*/
- if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) {
+ if (band == IEEE80211_BAND_5GHZ || erp) {
/*
* OFDM:
*
@@ -162,10 +162,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
/* Exported duration function for driver use */
__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ enum ieee80211_band band,
size_t frame_len,
struct ieee80211_rate *rate)
{
- struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
u16 dur;
int erp;
@@ -179,7 +179,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
erp = rate->flags & IEEE80211_RATE_ERP_G;
}
- dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
+ dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
short_preamble);
return cpu_to_le16(dur);
@@ -198,7 +198,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
u16 dur;
struct ieee80211_supported_band *sband;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[frame_txctl->band];
short_preamble = false;
@@ -213,13 +213,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
}
/* CTS duration */
- dur = ieee80211_frame_duration(local, 10, rate->bitrate,
+ dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
erp, short_preamble);
/* Data frame duration */
- dur += ieee80211_frame_duration(local, frame_len, rate->bitrate,
+ dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
erp, short_preamble);
/* ACK duration */
- dur += ieee80211_frame_duration(local, 10, rate->bitrate,
+ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
erp, short_preamble);
return cpu_to_le16(dur);
@@ -239,7 +239,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
u16 dur;
struct ieee80211_supported_band *sband;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[frame_txctl->band];
short_preamble = false;
@@ -253,11 +253,11 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
}
/* Data frame duration */
- dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
+ dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
erp, short_preamble);
if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ACK duration */
- dur += ieee80211_frame_duration(local, 10, rate->bitrate,
+ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
erp, short_preamble);
}
@@ -265,17 +265,45 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_ctstoself_duration);
+void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+
+ if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
+ continue;
+
+ if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
+ local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
+ continue;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (ac_queue == queue ||
+ (sdata->vif.cab_queue == queue &&
+ local->queue_stop_reasons[ac_queue] == 0 &&
+ skb_queue_empty(&local->pending[ac_queue])))
+ netif_wake_subqueue(sdata->dev, ac);
+ }
+ }
+}
+
static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
trace_wake_queue(local, queue, reason);
if (WARN_ON(queue >= hw->queues))
return;
+ if (!test_bit(reason, &local->queue_stop_reasons[queue]))
+ return;
+
__clear_bit(reason, &local->queue_stop_reasons[queue]);
if (local->queue_stop_reasons[queue] != 0)
@@ -284,11 +312,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (skb_queue_empty(&local->pending[queue])) {
rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
- continue;
- netif_wake_subqueue(sdata->dev, queue);
- }
+ ieee80211_propagate_queue_wake(local, queue);
rcu_read_unlock();
} else
tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,11 +347,21 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
if (WARN_ON(queue >= hw->queues))
return;
+ if (test_bit(reason, &local->queue_stop_reasons[queue]))
+ return;
+
__set_bit(reason, &local->queue_stop_reasons[queue]);
rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_stop_subqueue(sdata->dev, queue);
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (sdata->vif.hw_queue[ac] == queue ||
+ sdata->vif.cab_queue == queue)
+ netif_stop_subqueue(sdata->dev, ac);
+ }
+ }
rcu_read_unlock();
}
@@ -354,8 +388,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
{
struct ieee80211_hw *hw = &local->hw;
unsigned long flags;
- int queue = skb_get_queue_mapping(skb);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int queue = info->hw_queue;
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
@@ -379,10 +413,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
int queue, i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- for (i = 0; i < hw->queues; i++)
- __ieee80211_stop_queue(hw, i,
- IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
-
while ((skb = skb_dequeue(skbs))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -391,7 +421,11 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
continue;
}
- queue = skb_get_queue_mapping(skb);
+ queue = info->hw_queue;
+
+ __ieee80211_stop_queue(hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+
__skb_queue_tail(&local->pending[queue], skb);
}
@@ -404,12 +438,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-void ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
-{
- ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
-}
-
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason)
{
@@ -684,9 +712,9 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
else
elem_parse_failed = true;
break;
- case WLAN_EID_HT_INFORMATION:
- if (elen >= sizeof(struct ieee80211_ht_info))
- elems->ht_info_elem = (void *)pos;
+ case WLAN_EID_HT_OPERATION:
+ if (elen >= sizeof(struct ieee80211_ht_operation))
+ elems->ht_operation = (void *)pos;
else
elem_parse_failed = true;
break;
@@ -775,19 +803,22 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_queue_params qparam;
- int queue;
+ int ac;
bool use_11b;
int aCWmin, aCWmax;
if (!local->ops->conf_tx)
return;
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ return;
+
memset(&qparam, 0, sizeof(qparam));
use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
- for (queue = 0; queue < local->hw.queues; queue++) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
/* Set defaults according to 802.11-2007 Table 7-37 */
aCWmax = 1023;
if (use_11b)
@@ -795,21 +826,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
else
aCWmin = 15;
- switch (queue) {
- case 3: /* AC_BK */
+ switch (ac) {
+ case IEEE80211_AC_BK:
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
qparam.aifs = 7;
break;
default: /* never happens but let's not leave undefined */
- case 2: /* AC_BE */
+ case IEEE80211_AC_BE:
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
qparam.aifs = 3;
break;
- case 1: /* AC_VI */
+ case IEEE80211_AC_VI:
qparam.cw_max = aCWmin;
qparam.cw_min = (aCWmin + 1) / 2 - 1;
if (use_11b)
@@ -818,7 +849,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
qparam.txop = 3008/32;
qparam.aifs = 2;
break;
- case 0: /* AC_VO */
+ case IEEE80211_AC_VO:
qparam.cw_max = (aCWmin + 1) / 2 - 1;
qparam.cw_min = (aCWmin + 1) / 4 - 1;
if (use_11b)
@@ -831,8 +862,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
qparam.uapsd = false;
- sdata->tx_conf[queue] = qparam;
- drv_conf_tx(local, sdata, queue, &qparam);
+ sdata->tx_conf[ac] = qparam;
+ drv_conf_tx(local, sdata, ac, &qparam);
}
/* after reinitialize QoS TX queues setting to default,
@@ -878,10 +909,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
int i;
sband = local->hw.wiphy->bands[band];
- if (!sband) {
- WARN_ON(1);
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- }
+ if (WARN_ON(!sband))
+ return 1;
if (band == IEEE80211_BAND_2GHZ)
mandatory_flag = IEEE80211_RATE_MANDATORY_B;
@@ -1106,7 +1135,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
- enum ieee80211_band band)
+ enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
@@ -1115,10 +1144,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
int i, j;
sband = local->hw.wiphy->bands[band];
- if (!sband) {
- WARN_ON(1);
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- }
+ if (WARN_ON(!sband))
+ return 1;
bitrates = sband->bitrates;
num_rates = sband->n_bitrates;
@@ -1127,15 +1154,25 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
elems->ext_supp_rates_len; i++) {
u8 rate = 0;
int own_rate;
+ bool is_basic;
if (i < elems->supp_rates_len)
rate = elems->supp_rates[i];
else if (elems->ext_supp_rates)
rate = elems->ext_supp_rates
[i - elems->supp_rates_len];
own_rate = 5 * (rate & 0x7f);
- for (j = 0; j < num_rates; j++)
- if (bitrates[j].bitrate == own_rate)
+ is_basic = !!(rate & 0x80);
+
+ if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+ continue;
+
+ for (j = 0; j < num_rates; j++) {
+ if (bitrates[j].bitrate == own_rate) {
supp_rates |= BIT(j);
+ if (basic_rates && is_basic)
+ *basic_rates |= BIT(j);
+ }
+ }
}
return supp_rates;
}
@@ -1210,6 +1247,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
/* add interfaces */
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (sdata) {
+ res = drv_add_interface(local, sdata);
+ if (WARN_ON(res)) {
+ rcu_assign_pointer(local->monitor_sdata, NULL);
+ synchronize_net();
+ kfree(sdata);
+ }
+ }
+
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
@@ -1232,14 +1279,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
/* reconfigure tx conf */
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- !ieee80211_sdata_running(sdata))
- continue;
+ if (hw->queues >= IEEE80211_NUM_ACS) {
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ !ieee80211_sdata_running(sdata))
+ continue;
- for (i = 0; i < hw->queues; i++)
- drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ drv_conf_tx(local, sdata, i,
+ &sdata->tx_conf[i]);
+ }
}
/* reconfigure hardware */
@@ -1321,6 +1371,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ /* add back keys */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_enable_keys(sdata);
+
+ wake_up:
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1342,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
}
- /* add back keys */
- list_for_each_entry(sdata, &local->interfaces, list)
- if (ieee80211_sdata_running(sdata))
- ieee80211_enable_keys(sdata);
-
- wake_up:
ieee80211_wake_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -1611,57 +1661,55 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
return pos;
}
-u8 *ieee80211_ie_build_ht_info(u8 *pos,
- struct ieee80211_sta_ht_cap *ht_cap,
+u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type)
+ enum nl80211_channel_type channel_type,
+ u16 prot_mode)
{
- struct ieee80211_ht_info *ht_info;
+ struct ieee80211_ht_operation *ht_oper;
/* Build HT Information */
- *pos++ = WLAN_EID_HT_INFORMATION;
- *pos++ = sizeof(struct ieee80211_ht_info);
- ht_info = (struct ieee80211_ht_info *)pos;
- ht_info->control_chan =
+ *pos++ = WLAN_EID_HT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_ht_operation);
+ ht_oper = (struct ieee80211_ht_operation *)pos;
+ ht_oper->primary_chan =
ieee80211_frequency_to_channel(channel->center_freq);
switch (channel_type) {
case NL80211_CHAN_HT40MINUS:
- ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
break;
case NL80211_CHAN_HT40PLUS:
- ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
break;
case NL80211_CHAN_HT20:
default:
- ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
break;
}
- if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20)
+ ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
- /*
- * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
- * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
- */
- ht_info->operation_mode = 0x0000;
- ht_info->stbc_param = 0x0000;
+ ht_oper->operation_mode = cpu_to_le16(prot_mode);
+ ht_oper->stbc_param = 0x0000;
/* It seems that Basic MCS set and Supported MCS set
are identical for the first 10 bytes */
- memset(&ht_info->basic_set, 0, 16);
- memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+ memset(&ht_oper->basic_set, 0, 16);
+ memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10);
- return pos + sizeof(struct ieee80211_ht_info);
+ return pos + sizeof(struct ieee80211_ht_operation);
}
enum nl80211_channel_type
-ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
{
enum nl80211_channel_type channel_type;
- if (!ht_info)
+ if (!ht_oper)
return NL80211_CHAN_NO_HT;
- switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
channel_type = NL80211_CHAN_HT20;
break;
@@ -1678,13 +1726,15 @@ ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
return channel_type;
}
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
+ struct sk_buff *skb, bool need_basic)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, rates, *pos;
+ u32 basic_rates = vif->bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
rates = sband->n_bitrates;
@@ -1698,20 +1748,25 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = rates;
for (i = 0; i < rates; i++) {
+ u8 basic = 0;
+ if (need_basic && basic_rates & BIT(i))
+ basic = 0x80;
rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ *pos++ = basic | (u8) (rate / 5);
}
return 0;
}
-int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
+ struct sk_buff *skb, bool need_basic)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, exrates, *pos;
+ u32 basic_rates = vif->bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
exrates = sband->n_bitrates;
@@ -1728,9 +1783,25 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
for (i = 8; i < sband->n_bitrates; i++) {
+ u8 basic = 0;
+ if (need_basic && basic_rates & BIT(i))
+ basic = 0x80;
rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ *pos++ = basic | (u8) (rate / 5);
}
}
return 0;
}
+
+int ieee80211_ave_rssi(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) {
+ /* non-managed type inferfaces */
+ return 0;
+ }
+ return ifmgd->ave_beacon_signal;
+}
+EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7aa31bbfaa3b..c04d401dae92 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int hdrlen;
u8 *newhdr;
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return newhdr + hdrlen;
+
+ skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
@@ -313,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = info->control.hw_key;
- if (!info->control.hw_key) {
+ if (!hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
- } else if (info->control.hw_key->flags &
- IEEE80211_KEY_FLAG_GENERATE_IV) {
+ } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89511be3111e..c3d643a6536c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,26 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
+static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
+ /* in case we are a client verify acm is not set for this ac */
+ while (unlikely(local->wmm_acm & BIT(skb->priority))) {
+ if (wme_downgrade_ac(skb)) {
+ /*
+ * This should not really happen. The AP has marked all
+ * lower ACs to require admission control which is not
+ * a reasonable configuration. Allow the frame to be
+ * transmitted using AC_BK as a workaround.
+ */
+ break;
+ }
+ }
+
+ /* look up which queue to use for frames with this 1d tag */
+ return ieee802_1d_to_ac[skb->priority];
+}
+
/* Indicate which queue to use for this fully formed 802.11 frame */
u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
struct sk_buff *skb,
@@ -59,7 +79,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
{
u8 *p;
- if (local->hw.queues < 4)
+ if (local->hw.queues < IEEE80211_NUM_ACS)
return 0;
if (!ieee80211_is_data(hdr->frame_control)) {
@@ -86,9 +106,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
const u8 *ra = NULL;
bool qos = false;
- if (local->hw.queues < 4 || skb->len < 6) {
+ if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
skb->priority = 0; /* required for correct WPA/11i MIC */
- return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE);
+ return 0;
}
rcu_read_lock();
@@ -139,26 +159,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
return ieee80211_downgrade_queue(local, skb);
}
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
- struct sk_buff *skb)
-{
- /* in case we are a client verify acm is not set for this ac */
- while (unlikely(local->wmm_acm & BIT(skb->priority))) {
- if (wme_downgrade_ac(skb)) {
- /*
- * This should not really happen. The AP has marked all
- * lower ACs to require admission control which is not
- * a reasonable configuration. Allow the frame to be
- * transmitted using AC_BK as a workaround.
- */
- break;
- }
- }
-
- /* look up which queue to use for frames with this 1d tag */
- return ieee802_1d_to_ac[skb->priority];
-}
-
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 94edceb617ff..ca80818b7b66 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -22,8 +22,5 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
- struct sk_buff *skb);
-
#endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6e230efa049..b2650a9d45ff 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -122,9 +122,6 @@ static void ieee80211_work_work(struct work_struct *work)
enum work_action rma;
bool remain_off_channel = false;
- if (local->scanning)
- return;
-
/*
* ieee80211_queue_work() should have picked up most cases,
* here we'll pick the rest.
@@ -134,6 +131,11 @@ static void ieee80211_work_work(struct work_struct *work)
mutex_lock(&local->mtx);
+ if (local->scanning) {
+ mutex_unlock(&local->mtx);
+ return;
+ }
+
ieee80211_recalc_idle(local);
list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
@@ -226,13 +228,8 @@ static void ieee80211_work_work(struct work_struct *work)
run_again(local, jiffies + HZ/2);
}
- if (list_empty(&local->work_list) && local->scan_req &&
- !local->scanning)
- ieee80211_queue_delayed_work(&local->hw,
- &local->scan_work,
- round_jiffies_relative(0));
-
ieee80211_recalc_idle(local);
+ ieee80211_run_deferred_scan(local);
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0ae23c60968c..bdb53aba888e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -183,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/* hwaccel - with no need for software-generated IV */
return 0;
}
@@ -202,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, TKIP_IV_LEN);
memmove(pos, pos + TKIP_IV_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
pos += hdrlen;
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
/* Increase IV for the frame */
spin_lock_irqsave(&key->u.tkip.txlock, flags);
key->u.tkip.tx.iv16++;
@@ -422,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
new file mode 100644
index 000000000000..a967ddaa4e2f
--- /dev/null
+++ b/net/mac802154/Kconfig
@@ -0,0 +1,16 @@
+config MAC802154
+ tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
+ depends on IEEE802154 && EXPERIMENTAL
+ select CRC_CCITT
+ ---help---
+ This option enables the hardware independent IEEE 802.15.4
+ networking stack for SoftMAC devices (the ones implementing
+ only PHY level of IEEE 802.15.4 standard).
+
+ Note: this implementation is neither certified, nor feature
+ complete! Compatibility with other implementations hasn't
+ been tested yet!
+
+ If you plan to use HardMAC IEEE 802.15.4 devices, you can
+ say N here. Alternatievly you can say M to compile it as
+ module.
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
new file mode 100644
index 000000000000..ec1bd3fc1273
--- /dev/null
+++ b/net/mac802154/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MAC802154) += mac802154.o
+mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
new file mode 100644
index 000000000000..e3edfb0661b0
--- /dev/null
+++ b/net/mac802154/ieee802154_dev.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * Written by:
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ *
+ * Based on the code from 'linux-zigbee.sourceforge.net' project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <net/netlink.h>
+#include <linux/nl802154.h>
+#include <net/mac802154.h>
+#include <net/route.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+int mac802154_slave_open(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *ipriv = priv->hw;
+ int res = 0;
+
+ if (ipriv->open_count++ == 0) {
+ res = ipriv->ops->start(&ipriv->hw);
+ WARN_ON(res);
+ if (res)
+ goto err;
+ }
+
+ if (ipriv->ops->ieee_addr) {
+ res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr);
+ WARN_ON(res);
+ if (res)
+ goto err;
+ mac802154_dev_set_ieee_addr(dev);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+err:
+ priv->hw->open_count--;
+
+ return res;
+}
+
+int mac802154_slave_close(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *ipriv = priv->hw;
+
+ netif_stop_queue(dev);
+
+ if (!--ipriv->open_count)
+ ipriv->ops->stop(&ipriv->hw);
+
+ return 0;
+}
+
+static int
+mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ struct mac802154_priv *ipriv;
+ int err;
+
+ ipriv = wpan_phy_priv(phy);
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->hw = ipriv;
+
+ dev->needed_headroom = ipriv->hw.extra_tx_headroom;
+
+ SET_NETDEV_DEV(dev, &ipriv->phy->dev);
+
+ mutex_lock(&ipriv->slaves_mtx);
+ if (!ipriv->running) {
+ mutex_unlock(&ipriv->slaves_mtx);
+ return -ENODEV;
+ }
+ mutex_unlock(&ipriv->slaves_mtx);
+
+ err = register_netdev(dev);
+ if (err < 0)
+ return err;
+
+ rtnl_lock();
+ mutex_lock(&ipriv->slaves_mtx);
+ list_add_tail_rcu(&priv->list, &ipriv->slaves);
+ mutex_unlock(&ipriv->slaves_mtx);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static void
+mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *sdata;
+ ASSERT_RTNL();
+
+ sdata = netdev_priv(dev);
+
+ BUG_ON(sdata->hw->phy != phy);
+
+ mutex_lock(&sdata->hw->slaves_mtx);
+ list_del_rcu(&sdata->list);
+ mutex_unlock(&sdata->hw->slaves_mtx);
+
+ synchronize_rcu();
+ unregister_netdevice(sdata->dev);
+}
+
+static struct net_device *
+mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+
+ switch (type) {
+ case IEEE802154_DEV_MONITOR:
+ dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
+ name, mac802154_monitor_setup);
+ break;
+ default:
+ dev = NULL;
+ err = -EINVAL;
+ break;
+ }
+ if (!dev)
+ goto err;
+
+ err = mac802154_netdev_register(phy, dev);
+ if (err)
+ goto err_free;
+
+ dev_hold(dev); /* we return an incremented device refcount */
+ return dev;
+
+err_free:
+ free_netdev(dev);
+err:
+ return ERR_PTR(err);
+}
+
+struct ieee802154_dev *
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
+{
+ struct wpan_phy *phy;
+ struct mac802154_priv *priv;
+ size_t priv_size;
+
+ if (!ops || !ops->xmit || !ops->ed || !ops->start ||
+ !ops->stop || !ops->set_channel) {
+ printk(KERN_ERR
+ "undefined IEEE802.15.4 device operations\n");
+ return NULL;
+ }
+
+ /* Ensure 32-byte alignment of our private data and hw private data.
+ * We use the wpan_phy priv data for both our mac802154_priv and for
+ * the driver's private data
+ *
+ * in memory it'll be like this:
+ *
+ * +-----------------------+
+ * | struct wpan_phy |
+ * +-----------------------+
+ * | struct mac802154_priv |
+ * +-----------------------+
+ * | driver's private data |
+ * +-----------------------+
+ *
+ * Due to ieee802154 layer isn't aware of driver and MAC structures,
+ * so lets allign them here.
+ */
+
+ priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len;
+
+ phy = wpan_phy_alloc(priv_size);
+ if (!phy) {
+ printk(KERN_ERR
+ "failure to allocate master IEEE802.15.4 device\n");
+ return NULL;
+ }
+
+ priv = wpan_phy_priv(phy);
+ priv->hw.phy = priv->phy = phy;
+ priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
+ priv->ops = ops;
+
+ INIT_LIST_HEAD(&priv->slaves);
+ mutex_init(&priv->slaves_mtx);
+
+ return &priv->hw;
+}
+EXPORT_SYMBOL(ieee802154_alloc_device);
+
+void ieee802154_free_device(struct ieee802154_dev *hw)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+ BUG_ON(!list_empty(&priv->slaves));
+
+ wpan_phy_free(priv->phy);
+
+ mutex_destroy(&priv->slaves_mtx);
+}
+EXPORT_SYMBOL(ieee802154_free_device);
+
+int ieee802154_register_device(struct ieee802154_dev *dev)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ int rc = -ENOMEM;
+
+ priv->dev_workqueue =
+ create_singlethread_workqueue(wpan_phy_name(priv->phy));
+ if (!priv->dev_workqueue)
+ goto out;
+
+ wpan_phy_set_dev(priv->phy, priv->hw.parent);
+
+ priv->phy->add_iface = mac802154_add_iface;
+ priv->phy->del_iface = mac802154_del_iface;
+
+ rc = wpan_phy_register(priv->phy);
+ if (rc < 0)
+ goto out_wq;
+
+ rtnl_lock();
+
+ mutex_lock(&priv->slaves_mtx);
+ priv->running = MAC802154_DEVICE_RUN;
+ mutex_unlock(&priv->slaves_mtx);
+
+ rtnl_unlock();
+
+ return 0;
+
+out_wq:
+ destroy_workqueue(priv->dev_workqueue);
+out:
+ return rc;
+}
+EXPORT_SYMBOL(ieee802154_register_device);
+
+void ieee802154_unregister_device(struct ieee802154_dev *dev)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ struct mac802154_sub_if_data *sdata, *next;
+
+ flush_workqueue(priv->dev_workqueue);
+ destroy_workqueue(priv->dev_workqueue);
+
+ rtnl_lock();
+
+ mutex_lock(&priv->slaves_mtx);
+ priv->running = MAC802154_DEVICE_STOPPED;
+ mutex_unlock(&priv->slaves_mtx);
+
+ list_for_each_entry_safe(sdata, next, &priv->slaves, list) {
+ mutex_lock(&sdata->hw->slaves_mtx);
+ list_del(&sdata->list);
+ mutex_unlock(&sdata->hw->slaves_mtx);
+
+ unregister_netdevice(sdata->dev);
+ }
+
+ rtnl_unlock();
+
+ wpan_phy_unregister(priv->phy);
+}
+EXPORT_SYMBOL(ieee802154_unregister_device);
+
+MODULE_DESCRIPTION("IEEE 802.15.4 implementation");
+MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
new file mode 100644
index 000000000000..789d9c948aec
--- /dev/null
+++ b/net/mac802154/mac802154.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#ifndef MAC802154_H
+#define MAC802154_H
+
+/* mac802154 device private data */
+struct mac802154_priv {
+ struct ieee802154_dev hw;
+ struct ieee802154_ops *ops;
+
+ /* ieee802154 phy */
+ struct wpan_phy *phy;
+
+ int open_count;
+
+ /* As in mac80211 slaves list is modified:
+ * 1) under the RTNL
+ * 2) protected by slaves_mtx;
+ * 3) in an RCU manner
+ *
+ * So atomic readers can use any of this protection methods.
+ */
+ struct list_head slaves;
+ struct mutex slaves_mtx;
+
+ /* This one is used for scanning and other jobs not to be interfered
+ * with serial driver.
+ */
+ struct workqueue_struct *dev_workqueue;
+
+ /* SoftMAC device is registered and running. One can add subinterfaces.
+ * This flag should be modified under slaves_mtx and RTNL, so you can
+ * read them using any of protection methods.
+ */
+ bool running;
+};
+
+#define MAC802154_DEVICE_STOPPED 0x00
+#define MAC802154_DEVICE_RUN 0x01
+
+/* Slave interface definition.
+ *
+ * Slaves represent typical network interfaces available from userspace.
+ * Each ieee802154 device/transceiver may have several slaves and able
+ * to be associated with several networks at the same time.
+ */
+struct mac802154_sub_if_data {
+ struct list_head list; /* the ieee802154_priv->slaves list */
+
+ struct mac802154_priv *hw;
+ struct net_device *dev;
+
+ int type;
+
+ spinlock_t mib_lock;
+
+ __le16 pan_id;
+ __le16 short_addr;
+
+ u8 chan;
+ u8 page;
+
+ /* MAC BSN field */
+ u8 bsn;
+ /* MAC DSN field */
+ u8 dsn;
+};
+
+#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
+
+#define MAC802154_MAX_XMIT_ATTEMPTS 3
+
+#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
+
+extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
+
+int mac802154_slave_open(struct net_device *dev);
+int mac802154_slave_close(struct net_device *dev);
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
+void mac802154_monitor_setup(struct net_device *dev);
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+ u8 page, u8 chan);
+
+/* MIB callbacks */
+void mac802154_dev_set_ieee_addr(struct net_device *dev);
+
+#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
new file mode 100644
index 000000000000..7a5d0e052cd7
--- /dev/null
+++ b/net/mac802154/mac_cmd.c
@@ -0,0 +1,45 @@
+/*
+ * MAC commands interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+#include "mac802154.h"
+
+struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return to_phy(get_device(&priv->hw->phy->dev));
+}
+
+struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
+ .get_phy = mac802154_get_phy,
+};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
new file mode 100644
index 000000000000..ab59821ec729
--- /dev/null
+++ b/net/mac802154/mib.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/if_arp.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+struct hw_addr_filt_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+ unsigned long changed;
+};
+
+struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return priv->hw;
+}
+
+static void hw_addr_notify(struct work_struct *work)
+{
+ struct hw_addr_filt_notify_work *nw = container_of(work,
+ struct hw_addr_filt_notify_work, work);
+ struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
+ int res;
+
+ res = hw->ops->set_hw_addr_filt(&hw->hw,
+ &hw->hw.hw_filt,
+ nw->changed);
+ if (res)
+ pr_debug("failed changed mask %lx\n", nw->changed);
+
+ kfree(nw);
+
+ return;
+}
+
+static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct hw_addr_filt_notify_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, hw_addr_notify);
+ work->dev = dev;
+ work->changed = changed;
+ queue_work(priv->hw->dev_workqueue, &work->work);
+
+ return;
+}
+
+void mac802154_dev_set_ieee_addr(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct mac802154_priv *mac = priv->hw;
+
+ if (mac->ops->set_hw_addr_filt &&
+ memcmp(mac->hw.hw_filt.ieee_addr,
+ dev->dev_addr, IEEE802154_ADDR_LEN)) {
+ memcpy(mac->hw.hw_filt.ieee_addr,
+ dev->dev_addr, IEEE802154_ADDR_LEN);
+ set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
+ }
+}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
new file mode 100644
index 000000000000..434a26f76a80
--- /dev/null
+++ b/net/mac802154/monitor.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/ieee802154.h>
+#include <net/mac802154.h>
+#include <net/netlink.h>
+#include <net/wpan-phy.h>
+#include <linux/nl802154.h>
+
+#include "mac802154.h"
+
+static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ u8 chan, page;
+
+ priv = netdev_priv(dev);
+
+ /* FIXME: locking */
+ chan = priv->hw->phy->current_channel;
+ page = priv->hw->phy->current_page;
+
+ if (chan == MAC802154_CHAN_NONE) /* not initialized */
+ return NETDEV_TX_OK;
+
+ if (WARN_ON(page >= WPAN_NUM_PAGES) ||
+ WARN_ON(chan >= WPAN_NUM_CHANNELS))
+ return NETDEV_TX_OK;
+
+ skb->skb_iif = dev->ifindex;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return mac802154_tx(priv->hw, skb, page, chan);
+}
+
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+ struct mac802154_sub_if_data *sdata;
+ u16 crc = crc_ccitt(0, skb->data, skb->len);
+ u8 *data;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &priv->slaves, list) {
+ if (sdata->type != IEEE802154_DEV_MONITOR)
+ continue;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ skb2->dev = sdata->dev;
+ skb2->pkt_type = PACKET_HOST;
+ data = skb_put(skb2, 2);
+ data[0] = crc & 0xff;
+ data[1] = crc >> 8;
+
+ netif_rx_ni(skb2);
+ }
+ rcu_read_unlock();
+}
+
+static const struct net_device_ops mac802154_monitor_ops = {
+ .ndo_open = mac802154_slave_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = mac802154_monitor_xmit,
+};
+
+void mac802154_monitor_setup(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+
+ dev->addr_len = 0;
+ dev->hard_header_len = 0;
+ dev->needed_tailroom = 2; /* room for FCS */
+ dev->mtu = IEEE802154_MTU;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154_MONITOR;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->destructor = free_netdev;
+ dev->netdev_ops = &mac802154_monitor_ops;
+ dev->ml_priv = &mac802154_mlme_reduced;
+
+ priv = netdev_priv(dev);
+ priv->type = IEEE802154_DEV_MONITOR;
+
+ priv->chan = MAC802154_CHAN_NONE; /* not initialized */
+ priv->page = 0;
+}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
new file mode 100644
index 000000000000..4a7d76d4f8bc
--- /dev/null
+++ b/net/mac802154/rx.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+
+#include "mac802154.h"
+
+/* The IEEE 802.15.4 standard defines 4 MAC packet types:
+ * - beacon frame
+ * - MAC command frame
+ * - acknowledgement frame
+ * - data frame
+ *
+ * and only the data frame should be pushed to the upper layers, other types
+ * are just internal MAC layer management information. So only data packets
+ * are going to be sent to the networking queue, all other will be processed
+ * right here by using the device workqueue.
+ */
+struct rx_work {
+ struct sk_buff *skb;
+ struct work_struct work;
+ struct ieee802154_dev *dev;
+ u8 lqi;
+};
+
+static void
+mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+ mac_cb(skb)->lqi = lqi;
+ skb->protocol = htons(ETH_P_IEEE802154);
+ skb_reset_mac_header(skb);
+
+ BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
+
+ if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+ u16 crc;
+
+ if (skb->len < 2) {
+ pr_debug("got invalid frame\n");
+ goto out;
+ }
+ crc = crc_ccitt(0, skb->data, skb->len);
+ if (crc) {
+ pr_debug("CRC mismatch\n");
+ goto out;
+ }
+ skb_trim(skb, skb->len - 2); /* CRC */
+ }
+
+ mac802154_monitors_rx(priv, skb);
+out:
+ dev_kfree_skb(skb);
+ return;
+}
+
+static void mac802154_rx_worker(struct work_struct *work)
+{
+ struct rx_work *rw = container_of(work, struct rx_work, work);
+ struct sk_buff *skb = rw->skb;
+
+ mac802154_subif_rx(rw->dev, skb, rw->lqi);
+ kfree(rw);
+}
+
+void
+ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
+{
+ struct mac802154_priv *priv = mac802154_to_priv(dev);
+ struct rx_work *work;
+
+ if (!skb)
+ return;
+
+ work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mac802154_rx_worker);
+ work->skb = skb;
+ work->dev = dev;
+ work->lqi = lqi;
+
+ queue_work(priv->dev_workqueue, &work->work);
+}
+EXPORT_SYMBOL(ieee802154_rx_irqsafe);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
new file mode 100644
index 000000000000..8781d8f904d9
--- /dev/null
+++ b/net/mac802154/tx.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
+ * packets through the workqueue.
+ */
+struct xmit_work {
+ struct sk_buff *skb;
+ struct work_struct work;
+ struct mac802154_priv *priv;
+ u8 chan;
+ u8 page;
+ u8 xmit_attempts;
+};
+
+static void mac802154_xmit_worker(struct work_struct *work)
+{
+ struct xmit_work *xw = container_of(work, struct xmit_work, work);
+ int res;
+
+ mutex_lock(&xw->priv->phy->pib_lock);
+ if (xw->priv->phy->current_channel != xw->chan ||
+ xw->priv->phy->current_page != xw->page) {
+ res = xw->priv->ops->set_channel(&xw->priv->hw,
+ xw->page,
+ xw->chan);
+ if (res) {
+ pr_debug("set_channel failed\n");
+ goto out;
+ }
+ }
+
+ res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
+
+out:
+ mutex_unlock(&xw->priv->phy->pib_lock);
+
+ if (res) {
+ if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) {
+ queue_work(xw->priv->dev_workqueue, &xw->work);
+ return;
+ } else
+ pr_debug("transmission failed for %d times",
+ MAC802154_MAX_XMIT_ATTEMPTS);
+ }
+
+ dev_kfree_skb(xw->skb);
+
+ kfree(xw);
+}
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+ u8 page, u8 chan)
+{
+ struct xmit_work *work;
+
+ if (!(priv->phy->channels_supported[page] & (1 << chan)))
+ WARN_ON(1);
+ return NETDEV_TX_OK;
+
+ if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+ u16 crc = crc_ccitt(0, skb->data, skb->len);
+ u8 *data = skb_put(skb, 2);
+ data[0] = crc & 0xff;
+ data[1] = crc >> 8;
+ }
+
+ if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
+ if (!work)
+ return NETDEV_TX_BUSY;
+
+ INIT_WORK(&work->work, mac802154_xmit_worker);
+ work->skb = skb;
+ work->priv = priv;
+ work->page = page;
+ work->chan = chan;
+ work->xmit_attempts = 0;
+
+ queue_work(priv->dev_workqueue, &work->work);
+
+ return NETDEV_TX_OK;
+}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0c6f67e8f2e5..209c1ed43368 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -509,6 +509,21 @@ config NETFILTER_XT_TARGET_HL
since you can easily create immortal packets that loop
forever on the network.
+config NETFILTER_XT_TARGET_HMARK
+ tristate '"HMARK" target support'
+ depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds the "HMARK" target.
+
+ The target allows you to create rules in the "raw" and "mangle" tables
+ which set the skbuff mark by means of hash calculation within a given
+ range. The nfmark can influence the routing method (see "Use netfilter
+ MARK value as routing key") and can also be used by other subsystems to
+ change their behaviour.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_IDLETIMER
tristate "IDLETIMER target support"
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ca3676586f51..4e7960cc7b97 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e051332e..e19f3653db23 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -290,12 +290,3 @@ void __init netfilter_init(void)
if (netfilter_log_init() < 0)
panic("cannot initialize nf_log");
}
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_netfilter_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "netfilter", },
- { }
-};
-EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index a72a4dff0031..7e1b061aeeba 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
- htonl(map->first_ip + id * map->hosts));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts)))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
- htonl(map->first_ip + id * map->hosts));
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(members[id])));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts)) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id]))))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
- if (map->netmask != 32)
- NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
- NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
- htonl(sizeof(*map) + map->memsize));
- if (with_timeout(map->timeout))
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+ (map->netmask != 32 &&
+ nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize)) ||
+ (with_timeout(map->timeout) &&
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 81324c12c5be..d7eaf10edb6d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -111,7 +111,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
return -EAGAIN;
case MAC_FILLED:
return data->ether == NULL ||
- compare_ether_addr(data->ether, elem->ether) == 0;
+ ether_addr_equal(data->ether, elem->ether);
}
return 0;
}
@@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
- htonl(map->first_ip + id));
- if (elem->match == MAC_FILLED)
- NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
- elem->ether);
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id)) ||
+ (elem->match == MAC_FILLED &&
+ nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether)))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
@@ -224,7 +225,7 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
return -EAGAIN;
case MAC_FILLED:
return (data->ether == NULL ||
- compare_ether_addr(data->ether, elem->ether) == 0) &&
+ ether_addr_equal(data->ether, elem->ether)) &&
!bitmap_expired(map, data->id);
}
return 0;
@@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
- htonl(map->first_ip + id));
- if (elem->match == MAC_FILLED)
- NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
- elem->ether);
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id)) ||
+ (elem->match == MAC_FILLED &&
+ nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether)))
+ goto nla_put_failure;
timeout = elem->match == MAC_UNSET ? elem->timeout
: ip_set_timeout_get(elem->timeout);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
- NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
- htonl(sizeof(*map)
- + (map->last_ip - map->first_ip + 1) * map->dsize));
- if (with_timeout(map->timeout))
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) +
+ ((map->last_ip - map->first_ip + 1) *
+ map->dsize))) ||
+ (with_timeout(map->timeout) &&
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 382ec28ba72e..b9f1fce7053b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
- htons(map->first_port + id));
+ if (nla_put_net16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id)))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
- htons(map->first_port + id));
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(members[id])));
+ if (nla_put_net16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id)) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id]))))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
- NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
- htonl(sizeof(*map) + map->memsize));
- if (with_timeout(map->timeout))
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize)) ||
+ (with_timeout(map->timeout) &&
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e6c1c9605a58..819c342f5b30 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1092,19 +1092,21 @@ dump_last:
ret = -EMSGSIZE;
goto release_refcount;
}
- NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
- NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+ if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
+ goto nla_put_failure;
if (dump_flags & IPSET_FLAG_LIST_SETNAME)
goto next_set;
switch (cb->args[2]) {
case 0:
/* Core header data */
- NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
- set->type->name);
- NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
- set->family);
- NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
- set->revision);
+ if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
+ set->type->name) ||
+ nla_put_u8(skb, IPSET_ATTR_FAMILY,
+ set->family) ||
+ nla_put_u8(skb, IPSET_ATTR_REVISION,
+ set->revision))
+ goto nla_put_failure;
ret = set->variant->head(set, skb);
if (ret < 0)
goto release_refcount;
@@ -1410,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
IPSET_CMD_HEADER);
if (!nlh2)
goto nlmsg_failure;
- NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
- NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
- NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
- NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
- NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
+ nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
+ nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
+ nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
+ goto nla_put_failure;
nlmsg_end(skb2, nlh2);
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1469,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
IPSET_CMD_TYPE);
if (!nlh2)
goto nlmsg_failure;
- NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
- NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
- NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
- NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
- NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
+ nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
+ nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
+ nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
+ goto nla_put_failure;
nlmsg_end(skb2, nlh2);
pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1517,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
IPSET_CMD_PROTOCOL);
if (!nlh2)
goto nlmsg_failure;
- NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
+ goto nla_put_failure;
nlmsg_end(skb2, nlh2);
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1613,7 +1618,7 @@ static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
static int
ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
{
- unsigned *op;
+ unsigned int *op;
void *data;
int copylen = *len, ret = 0;
@@ -1621,7 +1626,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
return -EPERM;
if (optval != SO_IP_SET)
return -EBADF;
- if (*len < sizeof(unsigned))
+ if (*len < sizeof(unsigned int))
return -EINVAL;
data = vmalloc(*len);
@@ -1631,7 +1636,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
ret = -EFAULT;
goto done;
}
- op = (unsigned *) data;
+ op = (unsigned int *) data;
if (*op < IP_SET_OP_VERSION) {
/* Check the version at the beginning of operations */
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5139dea6019e..a68dbd4f1e4e 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
static inline bool
hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
{
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
const struct hash_ip4_telem *tdata =
(const struct hash_ip4_telem *)data;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))))
+ goto nla_put_failure;
return 0;
@@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix)
static bool
hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
{
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
const struct hash_ip6_telem *e =
(const struct hash_ip6_telem *)data;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -364,6 +368,7 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 netmask, hbits;
+ size_t hsize;
struct ip_set_hash *h;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
@@ -405,9 +410,12 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 9c27e249c171..92722bb82eea 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -93,9 +93,10 @@ static bool
hash_ipport4_data_list(struct sk_buff *skb,
const struct hash_ipport4_elem *data)
{
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb,
const struct hash_ipport4_telem *tdata =
(const struct hash_ipport4_telem *)data;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
-
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -308,9 +309,10 @@ static bool
hash_ipport6_data_list(struct sk_buff *skb,
const struct hash_ipport6_elem *data)
{
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb,
const struct hash_ipport6_telem *e =
(const struct hash_ipport6_telem *)data;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -449,6 +452,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -476,9 +480,12 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 9134057c0728..0637ce096def 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -94,10 +94,11 @@ static bool
hash_ipportip4_data_list(struct sk_buff *skb,
const struct hash_ipportip4_elem *data)
{
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb,
const struct hash_ipportip4_telem *tdata =
(const struct hash_ipportip4_telem *)data;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
-
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -319,10 +320,11 @@ static bool
hash_ipportip6_data_list(struct sk_buff *skb,
const struct hash_ipportip6_elem *data)
{
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb,
const struct hash_ipportip6_telem *e =
(const struct hash_ipportip6_telem *)data;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -467,6 +470,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -494,9 +498,12 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5d05e6969862..1ce21ca976e1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -124,13 +124,14 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -145,16 +146,16 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
(const struct hash_ipportnet4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -436,13 +437,14 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -457,15 +459,16 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
(const struct hash_ipportnet6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -616,6 +619,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -645,9 +649,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 7c3d945517cf..c57a6a09906d 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -111,10 +111,11 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -128,13 +129,13 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
(const struct hash_net4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -339,10 +340,11 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -356,12 +358,13 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
(const struct hash_net6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -460,6 +463,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
struct ip_set_hash *h;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -489,9 +493,12 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index f24037ff4322..ee863943c826 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -252,11 +252,12 @@ hash_netiface4_data_list(struct sk_buff *skb,
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -273,13 +274,14 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))))
+ goto nla_put_failure;
return 0;
@@ -555,11 +557,12 @@ hash_netiface6_data_list(struct sk_buff *skb,
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -576,13 +579,14 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
- NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+ nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -722,6 +726,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -752,9 +757,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->ahash_max = AHASH_MAX_SIZE;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ce2e77100b64..fc3143a2d41b 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -124,12 +124,13 @@ hash_netport4_data_list(struct sk_buff *skb,
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -144,15 +145,15 @@ hash_netport4_data_tlist(struct sk_buff *skb,
(const struct hash_netport4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(tdata->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+ if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -402,12 +403,13 @@ hash_netport6_data_list(struct sk_buff *skb,
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -422,14 +424,15 @@ hash_netport6_data_tlist(struct sk_buff *skb,
(const struct hash_netport6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
- NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
- NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
- NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(e->timeout)));
- if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+ if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+ nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+ nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+ nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout))) ||
+ (flags &&
+ nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -572,6 +575,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
+ size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
@@ -601,9 +605,12 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
- h->table = ip_set_alloc(
- sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket));
+ hsize = htable_size(hbits);
+ if (hsize == 0) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 7e095f9005f0..6cb1225765f9 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
- if (with_timeout(map->timeout))
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
- NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
- htonl(sizeof(*map) + map->size * map->dsize));
+ if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
+ (with_timeout(map->timeout) &&
+ nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
+ nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+ nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->size * map->dsize)))
+ goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
@@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set,
} else
goto nla_put_failure;
}
- NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
- ip_set_name_byindex(e->id));
+ if (nla_put_string(skb, IPSET_ATTR_NAME,
+ ip_set_name_byindex(e->id)))
+ goto nla_put_failure;
if (with_timeout(map->timeout)) {
const struct set_telem *te =
(const struct set_telem *) e;
- NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
- htonl(ip_set_timeout_get(te->timeout)));
+ __be32 to = htonl(ip_set_timeout_get(te->timeout));
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
+ goto nla_put_failure;
}
ipset_nest_end(skb, nested);
}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 52856178c9d7..64f9e8f13207 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -313,7 +313,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
* Assumes already checked proto==IPPROTO_TCP and diff!=0.
*/
static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
- unsigned flag, __u32 seq, int diff)
+ unsigned int flag, __u32 seq, int diff)
{
/* spinlock is to keep updating cp->flags atomic */
spin_lock(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 29fa5badde75..1548df9a7524 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -86,42 +86,42 @@ struct ip_vs_aligned_lock
static struct ip_vs_aligned_lock
__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
-static inline void ct_read_lock(unsigned key)
+static inline void ct_read_lock(unsigned int key)
{
read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_read_unlock(unsigned key)
+static inline void ct_read_unlock(unsigned int key)
{
read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_write_lock(unsigned key)
+static inline void ct_write_lock(unsigned int key)
{
write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_write_unlock(unsigned key)
+static inline void ct_write_unlock(unsigned int key)
{
write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_read_lock_bh(unsigned key)
+static inline void ct_read_lock_bh(unsigned int key)
{
read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_read_unlock_bh(unsigned key)
+static inline void ct_read_unlock_bh(unsigned int key)
{
read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_write_lock_bh(unsigned key)
+static inline void ct_write_lock_bh(unsigned int key)
{
write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
-static inline void ct_write_unlock_bh(unsigned key)
+static inline void ct_write_unlock_bh(unsigned int key)
{
write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
@@ -130,7 +130,7 @@ static inline void ct_write_unlock_bh(unsigned key)
/*
* Returns hash value for IPVS connection entry
*/
-static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
const union nf_inet_addr *addr,
__be16 port)
{
@@ -188,7 +188,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
{
- unsigned hash;
+ unsigned int hash;
int ret;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
@@ -224,7 +224,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
*/
static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
{
- unsigned hash;
+ unsigned int hash;
int ret;
/* unhash it and decrease its reference counter */
@@ -257,7 +257,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
static inline struct ip_vs_conn *
__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
- unsigned hash;
+ unsigned int hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
- unsigned hash;
+ unsigned int hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
@@ -394,7 +394,7 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
* p->vaddr, p->vport: pkt dest address (foreign host) */
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
- unsigned hash;
+ unsigned int hash;
struct ip_vs_conn *cp, *ret=NULL;
struct hlist_node *n;
@@ -548,6 +548,7 @@ static inline void
ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
{
unsigned int conn_flags;
+ __u32 flags;
/* if dest is NULL, then return directly */
if (!dest)
@@ -559,17 +560,19 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
conn_flags = atomic_read(&dest->conn_flags);
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+ flags = cp->flags;
/* Bind with the destination and its corresponding transmitter */
- if (cp->flags & IP_VS_CONN_F_SYNC) {
+ if (flags & IP_VS_CONN_F_SYNC) {
/* if the connection is not template and is created
* by sync, preserve the activity flag.
*/
- if (!(cp->flags & IP_VS_CONN_F_TEMPLATE))
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
conn_flags &= ~IP_VS_CONN_F_INACTIVE;
/* connections inherit forwarding method from dest */
- cp->flags &= ~IP_VS_CONN_F_FWD_MASK;
+ flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
}
- cp->flags |= conn_flags;
+ flags |= conn_flags;
+ cp->flags = flags;
cp->dest = dest;
IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
@@ -584,12 +587,12 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
atomic_read(&dest->refcnt));
/* Update the connection counters */
- if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
- /* It is a normal connection, so increase the inactive
- connection counter because it is in TCP SYNRECV
- state (inactive) or other protocol inacive state */
- if ((cp->flags & IP_VS_CONN_F_SYNC) &&
- (!(cp->flags & IP_VS_CONN_F_INACTIVE)))
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+ /* It is a normal connection, so modify the counters
+ * according to the flags, later the protocol can
+ * update them on state change
+ */
+ if (!(flags & IP_VS_CONN_F_INACTIVE))
atomic_inc(&dest->activeconns);
else
atomic_inc(&dest->inactconns);
@@ -613,14 +616,40 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest;
- if ((cp) && (!cp->dest)) {
- dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
- cp->dport, &cp->vaddr, cp->vport,
- cp->protocol, cp->fwmark, cp->flags);
+ dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+ cp->dport, &cp->vaddr, cp->vport,
+ cp->protocol, cp->fwmark, cp->flags);
+ if (dest) {
+ struct ip_vs_proto_data *pd;
+
+ spin_lock(&cp->lock);
+ if (cp->dest) {
+ spin_unlock(&cp->lock);
+ return dest;
+ }
+
+ /* Applications work depending on the forwarding method
+ * but better to reassign them always when binding dest */
+ if (cp->app)
+ ip_vs_unbind_app(cp);
+
ip_vs_bind_dest(cp, dest);
- return dest;
- } else
- return NULL;
+ spin_unlock(&cp->lock);
+
+ /* Update its packet transmitter */
+ cp->packet_xmit = NULL;
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ ip_vs_bind_xmit_v6(cp);
+ else
+#endif
+ ip_vs_bind_xmit(cp);
+
+ pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol);
+ if (pd && atomic_read(&pd->appcnt))
+ ip_vs_bind_app(cp, pd->pp);
+ }
+ return dest;
}
@@ -743,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
- struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+ struct net *net = ip_vs_conn_net(cp);
+ struct netns_ipvs *ipvs = net_ipvs(net);
cp->timeout = 60*HZ;
@@ -808,6 +838,9 @@ static void ip_vs_conn_expire(unsigned long data)
atomic_read(&cp->refcnt)-1,
atomic_read(&cp->n_control));
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
+
ip_vs_conn_put(cp);
}
@@ -824,7 +857,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
*/
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
- const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
+ const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
@@ -881,6 +914,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
/* Set its state and timeout */
cp->state = 0;
cp->timeout = 3*HZ;
+ cp->sync_endtime = jiffies & ~3UL;
/* Bind its packet transmitter */
#ifdef CONFIG_IP_VS_IPV6
@@ -1057,7 +1091,7 @@ static const struct file_operations ip_vs_conn_fops = {
.release = seq_release_net,
};
-static const char *ip_vs_origin_name(unsigned flags)
+static const char *ip_vs_origin_name(unsigned int flags)
{
if (flags & IP_VS_CONN_F_SYNC)
return "SYNC";
@@ -1169,7 +1203,7 @@ void ip_vs_random_dropentry(struct net *net)
* Randomly scan 1/32 of the whole table every second
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
- unsigned hash = net_random() & ip_vs_conn_tab_mask;
+ unsigned int hash = net_random() & ip_vs_conn_tab_mask;
struct hlist_node *n;
/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..a54b018c6eea 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -80,7 +80,7 @@ static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
#define icmp_id(icmph) (((icmph)->un).echo.id)
#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
-const char *ip_vs_proto_name(unsigned proto)
+const char *ip_vs_proto_name(unsigned int proto)
{
static char buf[20];
@@ -1613,34 +1613,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
else
pkts = atomic_add_return(1, &cp->in_pkts);
- if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
- cp->protocol == IPPROTO_SCTP) {
- if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (pkts % sysctl_sync_period(ipvs)
- == sysctl_sync_threshold(ipvs))) ||
- (cp->old_state != cp->state &&
- ((cp->state == IP_VS_SCTP_S_CLOSED) ||
- (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
- (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
- ip_vs_sync_conn(net, cp);
- goto out;
- }
- }
-
- /* Keep this block last: TCP and others with pp->num_states <= 1 */
- else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
- (((cp->protocol != IPPROTO_TCP ||
- cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (pkts % sysctl_sync_period(ipvs)
- == sysctl_sync_threshold(ipvs))) ||
- ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
- ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
- (cp->state == IP_VS_TCP_S_CLOSE) ||
- (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
- (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
- ip_vs_sync_conn(net, cp);
-out:
- cp->old_state = cp->state;
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ ip_vs_sync_conn(net, cp, pkts);
ip_vs_conn_put(cp);
return ret;
@@ -1924,6 +1898,7 @@ protocol_fail:
control_fail:
ip_vs_estimator_net_cleanup(net);
estimator_fail:
+ net->ipvs = NULL;
return -ENOMEM;
}
@@ -1936,6 +1911,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
ip_vs_control_net_cleanup(net);
ip_vs_estimator_net_cleanup(net);
IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+ net->ipvs = NULL;
}
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1969,18 @@ static int __init ip_vs_init(void)
goto cleanup_dev;
}
+ ret = ip_vs_register_nl_ioctl();
+ if (ret < 0) {
+ pr_err("can't register netlink/ioctl.\n");
+ goto cleanup_hooks;
+ }
+
pr_info("ipvs loaded.\n");
return ret;
+cleanup_hooks:
+ nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
cleanup_dev:
unregister_pernet_device(&ipvs_core_dev_ops);
cleanup_sub:
@@ -2012,6 +1996,7 @@ exit:
static void __exit ip_vs_cleanup(void)
{
+ ip_vs_unregister_nl_ioctl();
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
unregister_pernet_device(&ipvs_core_dev_ops);
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..dd811b8dd97c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -265,11 +265,11 @@ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
/*
* Returns hash value for virtual service
*/
-static inline unsigned
-ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+static inline unsigned int
+ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
const union nf_inet_addr *addr, __be16 port)
{
- register unsigned porth = ntohs(port);
+ register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
@@ -286,7 +286,7 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
/*
* Returns hash value of fwmark for virtual service lookup
*/
-static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
+static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
@@ -298,7 +298,7 @@ static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
*/
static int ip_vs_svc_hash(struct ip_vs_service *svc)
{
- unsigned hash;
+ unsigned int hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
pr_err("%s(): request for already hashed, called from %pF\n",
@@ -361,7 +361,7 @@ static inline struct ip_vs_service *
__ip_vs_service_find(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
- unsigned hash;
+ unsigned int hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
@@ -388,7 +388,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol,
static inline struct ip_vs_service *
__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
- unsigned hash;
+ unsigned int hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
@@ -489,11 +489,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
/*
* Returns hash value for real service
*/
-static inline unsigned ip_vs_rs_hashkey(int af,
+static inline unsigned int ip_vs_rs_hashkey(int af,
const union nf_inet_addr *addr,
__be16 port)
{
- register unsigned porth = ntohs(port);
+ register unsigned int porth = ntohs(port);
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
@@ -512,7 +512,7 @@ static inline unsigned ip_vs_rs_hashkey(int af,
*/
static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
- unsigned hash;
+ unsigned int hash;
if (!list_empty(&dest->d_list)) {
return 0;
@@ -555,7 +555,7 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
__be16 dport)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- unsigned hash;
+ unsigned int hash;
struct ip_vs_dest *dest;
/*
@@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest **dest_p)
{
struct ip_vs_dest *dest;
- unsigned atype;
+ unsigned int atype;
EnterFunction(2);
@@ -1599,6 +1599,10 @@ static int ip_vs_zero_all(struct net *net)
}
#ifdef CONFIG_SYSCTL
+
+static int zero;
+static int three = 3;
+
static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1632,7 +1636,8 @@ proc_do_sync_threshold(ctl_table *table, int write,
memcpy(val, valp, sizeof(val));
rc = proc_dointvec(table, write, buffer, lenp, ppos);
- if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
+ if (write && (valp[0] < 0 || valp[1] < 0 ||
+ (valp[0] >= valp[1] && valp[1]))) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
}
@@ -1652,9 +1657,24 @@ proc_do_sync_mode(ctl_table *table, int write,
if ((*valp < 0) || (*valp > 1)) {
/* Restore the correct value */
*valp = val;
- } else {
- struct net *net = current->nsproxy->net_ns;
- ip_vs_sync_switch_mode(net, val);
+ }
+ }
+ return rc;
+}
+
+static int
+proc_do_sync_ports(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int *valp = table->data;
+ int val = *valp;
+ int rc;
+
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+ if (*valp < 1 || !is_power_of_2(*valp)) {
+ /* Restore the correct value */
+ *valp = val;
}
}
return rc;
@@ -1718,6 +1738,24 @@ static struct ctl_table vs_vars[] = {
.proc_handler = &proc_do_sync_mode,
},
{
+ .procname = "sync_ports",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_do_sync_ports,
+ },
+ {
+ .procname = "sync_qlen_max",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_sock_size",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "cache_bypass",
.maxlen = sizeof(int),
.mode = 0644,
@@ -1743,6 +1781,20 @@ static struct ctl_table vs_vars[] = {
.proc_handler = proc_do_sync_threshold,
},
{
+ .procname = "sync_refresh_period",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "sync_retries",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &three,
+ },
+ {
.procname = "nat_icmp_send",
.maxlen = sizeof(int),
.mode = 0644,
@@ -1846,13 +1898,6 @@ static struct ctl_table vs_vars[] = {
{ }
};
-const struct ctl_path net_vs_ctl_path[] = {
- { .procname = "net", },
- { .procname = "ipv4", },
- { .procname = "vs", },
- { }
-};
-EXPORT_SYMBOL_GPL(net_vs_ctl_path);
#endif
#ifdef CONFIG_PROC_FS
@@ -1867,7 +1912,7 @@ struct ip_vs_iter {
* Write the contents of the VS rule table to a PROCfs file.
* (It is kept just for backward compatibility)
*/
-static inline const char *ip_vs_fwd_name(unsigned flags)
+static inline const char *ip_vs_fwd_name(unsigned int flags)
{
switch (flags & IP_VS_CONN_F_FWD_MASK) {
case IP_VS_CONN_F_LOCALNODE:
@@ -2816,17 +2861,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
ip_vs_copy_stats(&ustats, stats);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
- NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
- NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
-
+ if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
+ nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
+ nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+ goto nla_put_failure;
nla_nest_end(skb, nl_stats);
return 0;
@@ -2847,23 +2892,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
if (!nl_service)
return -EMSGSIZE;
- NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
-
+ if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
+ goto nla_put_failure;
if (svc->fwmark) {
- NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
+ if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
+ goto nla_put_failure;
} else {
- NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
- NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
- NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
+ if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
+ nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
+ nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
+ goto nla_put_failure;
}
- NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
- if (svc->pe)
- NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name);
- NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
- NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
- NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
-
+ if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
+ (svc->pe &&
+ nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
+ nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
+ nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
+ nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
+ goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
goto nla_put_failure;
@@ -3038,21 +3085,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
if (!nl_dest)
return -EMSGSIZE;
- NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
- NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
-
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
- atomic_read(&dest->activeconns));
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
- atomic_read(&dest->inactconns));
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
- atomic_read(&dest->persistconns));
-
+ if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
+ nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+ (atomic_read(&dest->conn_flags) &
+ IP_VS_CONN_F_FWD_MASK)) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
+ atomic_read(&dest->weight)) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
+ atomic_read(&dest->activeconns)) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
+ atomic_read(&dest->inactconns)) ||
+ nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
+ atomic_read(&dest->persistconns)))
+ goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
goto nla_put_failure;
@@ -3181,10 +3229,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
if (!nl_daemon)
return -EMSGSIZE;
- NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
- NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
- NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
-
+ if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
+ nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
+ nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+ goto nla_put_failure;
nla_nest_end(skb, nl_daemon);
return 0;
@@ -3473,21 +3521,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
__ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
- NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
- NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
- t.tcp_fin_timeout);
+ if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
+ t.tcp_timeout) ||
+ nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+ t.tcp_fin_timeout))
+ goto nla_put_failure;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
- NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
+ if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
+ goto nla_put_failure;
#endif
break;
}
case IPVS_CMD_GET_INFO:
- NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
- NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
- ip_vs_conn_tab_size);
+ if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
+ IP_VS_VERSION_CODE) ||
+ nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
+ ip_vs_conn_tab_size))
+ goto nla_put_failure;
break;
}
@@ -3654,6 +3707,12 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
tbl[idx++].data = &ipvs->sysctl_snat_reroute;
ipvs->sysctl_sync_ver = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ver;
+ ipvs->sysctl_sync_ports = 1;
+ tbl[idx++].data = &ipvs->sysctl_sync_ports;
+ ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
+ tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
+ ipvs->sysctl_sync_sock_size = 0;
+ tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
@@ -3661,11 +3720,14 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+ ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
+ tbl[idx++].data = &ipvs->sysctl_sync_retries;
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
- ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
- tbl);
+ ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
@@ -3680,7 +3742,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
return 0;
}
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3692,7 +3754,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
#else
int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
#endif
@@ -3750,21 +3812,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
free_percpu(ipvs->tot_stats.cpustats);
}
-int __init ip_vs_control_init(void)
+int __init ip_vs_register_nl_ioctl(void)
{
- int idx;
int ret;
- EnterFunction(2);
-
- /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
- INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
- }
-
- smp_wmb(); /* Do we really need it now ? */
-
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
@@ -3776,28 +3827,47 @@ int __init ip_vs_control_init(void)
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
-
- ret = register_netdevice_notifier(&ip_vs_dst_notifier);
- if (ret < 0)
- goto err_notf;
-
- LeaveFunction(2);
return 0;
-err_notf:
- ip_vs_genl_unregister();
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
+void ip_vs_unregister_nl_ioctl(void)
+{
+ ip_vs_genl_unregister();
+ nf_unregister_sockopt(&ip_vs_sockopts);
+}
+
+int __init ip_vs_control_init(void)
+{
+ int idx;
+ int ret;
+
+ EnterFunction(2);
+
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
+ INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
+ }
+
+ smp_wmb(); /* Do we really need it now ? */
+
+ ret = register_netdevice_notifier(&ip_vs_dst_notifier);
+ if (ret < 0)
+ return ret;
+
+ LeaveFunction(2);
+ return 0;
+}
+
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
unregister_netdevice_notifier(&ip_vs_dst_notifier);
- ip_vs_genl_unregister();
- nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
}
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 1c269e56200a..8b7dca9ea422 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -68,7 +68,7 @@ struct ip_vs_dh_bucket {
/*
* Returns hash value for IPVS DH entry
*/
-static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
@@ -149,7 +149,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
/* allocate the DH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..b20b29c903ef 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -177,7 +177,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
__be16 port;
struct ip_vs_conn *n_cp;
char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
- unsigned buf_len;
+ unsigned int buf_len;
int ret = 0;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
struct ip_vs_app *app;
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
if (!app)
return -ENOMEM;
@@ -483,7 +485,7 @@ static struct pernet_operations ip_vs_ftp_ops = {
.exit = __ip_vs_ftp_exit,
};
-int __init ip_vs_ftp_init(void)
+static int __init ip_vs_ftp_init(void)
{
int rv;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..df646ccf08a7 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -142,7 +142,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
/*
* Returns hash value for IPVS LBLC entry
*/
-static inline unsigned
+static inline unsigned int
ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
@@ -163,7 +163,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
static void
ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
{
- unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
+ unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
list_add(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries);
@@ -178,7 +178,7 @@ static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
const union nf_inet_addr *addr)
{
- unsigned hash = ip_vs_lblc_hashkey(af, addr);
+ unsigned int hash = ip_vs_lblc_hashkey(af, addr);
struct ip_vs_lblc_entry *en;
list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -342,7 +342,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
/*
* Allocate the ip_vs_lblc_table for this service
*/
- tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
@@ -563,8 +566,7 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
ipvs->lblc_ctl_header =
- register_net_sysctl_table(net, net_vs_ctl_path,
- ipvs->lblc_ctl_table);
+ register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
if (!ipvs->lblc_ctl_header) {
if (!net_eq(net, &init_net))
kfree(ipvs->lblc_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..570e31ea427a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -311,7 +311,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
/*
* Returns hash value for IPVS LBLCR entry
*/
-static inline unsigned
+static inline unsigned int
ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
@@ -332,7 +332,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
static void
ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
{
- unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
+ unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
list_add(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries);
@@ -347,7 +347,7 @@ static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
const union nf_inet_addr *addr)
{
- unsigned hash = ip_vs_lblcr_hashkey(af, addr);
+ unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
struct ip_vs_lblcr_entry *en;
list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -511,7 +511,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/*
* Allocate the ip_vs_lblcr_table for this service
*/
- tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
@@ -757,8 +760,7 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
ipvs->lblcr_ctl_header =
- register_net_sysctl_table(net, net_vs_ctl_path,
- ipvs->lblcr_ctl_table);
+ register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
if (!ipvs->lblcr_ctl_header) {
if (!net_eq(net, &init_net))
kfree(ipvs->lblcr_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a8833250..50d82186da87 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -48,7 +48,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
*/
static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
- unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp->next = ip_vs_proto_table[hash];
ip_vs_proto_table[hash] = pp;
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
-#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
- defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
- defined(CONFIG_IP_VS_PROTO_ESP)
/*
* register an ipvs protocols netns related data
*/
@@ -69,9 +66,9 @@ static int
register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
struct ip_vs_proto_data *pd =
- kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+ kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
if (!pd)
return -ENOMEM;
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
ipvs->proto_data_table[hash] = pd;
atomic_set(&pd->appcnt, 0); /* Init app counter */
- if (pp->init_netns != NULL)
- pp->init_netns(net, pd);
+ if (pp->init_netns != NULL) {
+ int ret = pp->init_netns(net, pd);
+ if (ret) {
+ /* unlink an free proto data */
+ ipvs->proto_data_table[hash] = pd->next;
+ kfree(pd);
+ return ret;
+ }
+ }
return 0;
}
-#endif
/*
* unregister an ipvs protocol
@@ -94,7 +97,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
{
struct ip_vs_protocol **pp_p;
- unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp_p = &ip_vs_proto_table[hash];
for (; *pp_p; pp_p = &(*pp_p)->next) {
@@ -117,7 +120,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_proto_data **pd_p;
- unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+ unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
pd_p = &ipvs->proto_data_table[hash];
for (; *pd_p; pd_p = &(*pd_p)->next) {
@@ -139,7 +142,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
{
struct ip_vs_protocol *pp;
- unsigned hash = IP_VS_PROTO_HASH(proto);
+ unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
if (pp->protocol == proto)
@@ -153,11 +156,11 @@ EXPORT_SYMBOL(ip_vs_proto_get);
/*
* get ip_vs_protocol object data by netns and proto
*/
-struct ip_vs_proto_data *
+static struct ip_vs_proto_data *
__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
{
struct ip_vs_proto_data *pd;
- unsigned hash = IP_VS_PROTO_HASH(proto);
+ unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
if (pd->pp->protocol == proto)
@@ -196,7 +199,7 @@ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
int *
ip_vs_create_timeout_table(int *table, int size)
{
- return kmemdup(table, size, GFP_ATOMIC);
+ return kmemdup(table, size, GFP_KERNEL);
}
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
*/
int __net_init ip_vs_protocol_net_init(struct net *net)
{
+ int i, ret;
+ static struct ip_vs_protocol *protos[] = {
#ifdef CONFIG_IP_VS_PROTO_TCP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+ &ip_vs_protocol_tcp,
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+ &ip_vs_protocol_udp,
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+ &ip_vs_protocol_sctp,
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
- register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+ &ip_vs_protocol_ah,
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+ &ip_vs_protocol_esp,
#endif
+ };
+
+ for (i = 0; i < ARRAY_SIZE(protos); i++) {
+ ret = register_ip_vs_proto_netns(net, protos[i]);
+ if (ret < 0)
+ goto cleanup;
+ }
return 0;
+
+cleanup:
+ ip_vs_protocol_net_cleanup(net);
+ return ret;
}
void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->sctp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
sizeof(sctp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->tcp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
sizeof(tcp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
pd->tcp_state_table = tcp_states;
+ return 0;
}
static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
}
-static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->udp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
sizeof(udp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 069e8d4d5c01..05126521743e 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -70,7 +70,7 @@ struct ip_vs_sh_bucket {
/*
* Returns hash value for IPVS SH entry
*/
-static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
@@ -162,7 +162,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
/* allocate the SH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8a0d6d6889f0..effa10c9e4e3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -196,6 +196,7 @@ struct ip_vs_sync_thread_data {
struct net *net;
struct socket *sock;
char *buf;
+ int id;
};
/* Version 0 definition of packet sizes */
@@ -271,13 +272,6 @@ struct ip_vs_sync_buff {
unsigned char *end;
};
-/* multicast addr */
-static struct sockaddr_in mcast_addr = {
- .sin_family = AF_INET,
- .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
- .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
-};
-
/*
* Copy of struct ip_vs_seq
* From unaligned network order to aligned host order
@@ -300,18 +294,22 @@ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
put_unaligned_be32(ho->previous_delta, &no->previous_delta);
}
-static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
+static inline struct ip_vs_sync_buff *
+sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
{
struct ip_vs_sync_buff *sb;
spin_lock_bh(&ipvs->sync_lock);
- if (list_empty(&ipvs->sync_queue)) {
+ if (list_empty(&ms->sync_queue)) {
sb = NULL;
+ __set_current_state(TASK_INTERRUPTIBLE);
} else {
- sb = list_entry(ipvs->sync_queue.next,
- struct ip_vs_sync_buff,
+ sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff,
list);
list_del(&sb->list);
+ ms->sync_queue_len--;
+ if (!ms->sync_queue_len)
+ ms->sync_queue_delay = 0;
}
spin_unlock_bh(&ipvs->sync_lock);
@@ -334,7 +332,7 @@ ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
kfree(sb);
return NULL;
}
- sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */
+ sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */
sb->mesg->version = SYNC_PROTO_VER;
sb->mesg->syncid = ipvs->master_syncid;
sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
@@ -353,14 +351,22 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
kfree(sb);
}
-static inline void sb_queue_tail(struct netns_ipvs *ipvs)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs,
+ struct ipvs_master_sync_state *ms)
{
- struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+ struct ip_vs_sync_buff *sb = ms->sync_buff;
spin_lock(&ipvs->sync_lock);
- if (ipvs->sync_state & IP_VS_STATE_MASTER)
- list_add_tail(&sb->list, &ipvs->sync_queue);
- else
+ if (ipvs->sync_state & IP_VS_STATE_MASTER &&
+ ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) {
+ if (!ms->sync_queue_len)
+ schedule_delayed_work(&ms->master_wakeup_work,
+ max(IPVS_SYNC_SEND_DELAY, 1));
+ ms->sync_queue_len++;
+ list_add_tail(&sb->list, &ms->sync_queue);
+ if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
+ wake_up_process(ms->master_thread);
+ } else
ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock);
}
@@ -370,49 +376,26 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs)
* than the specified time or the specified time is zero.
*/
static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms,
+ unsigned long time)
{
struct ip_vs_sync_buff *sb;
spin_lock_bh(&ipvs->sync_buff_lock);
- if (ipvs->sync_buff &&
- time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
- sb = ipvs->sync_buff;
- ipvs->sync_buff = NULL;
+ sb = ms->sync_buff;
+ if (sb && time_after_eq(jiffies - sb->firstuse, time)) {
+ ms->sync_buff = NULL;
+ __set_current_state(TASK_RUNNING);
} else
sb = NULL;
spin_unlock_bh(&ipvs->sync_buff_lock);
return sb;
}
-/*
- * Switch mode from sending version 0 or 1
- * - must handle sync_buf
- */
-void ip_vs_sync_switch_mode(struct net *net, int mode)
+static inline int
+select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp)
{
- struct netns_ipvs *ipvs = net_ipvs(net);
-
- if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
- return;
- if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff)
- return;
-
- spin_lock_bh(&ipvs->sync_buff_lock);
- /* Buffer empty ? then let buf_create do the job */
- if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
- kfree(ipvs->sync_buff);
- ipvs->sync_buff = NULL;
- } else {
- spin_lock_bh(&ipvs->sync_lock);
- if (ipvs->sync_state & IP_VS_STATE_MASTER)
- list_add_tail(&ipvs->sync_buff->list,
- &ipvs->sync_queue);
- else
- ip_vs_sync_buff_release(ipvs->sync_buff);
- spin_unlock_bh(&ipvs->sync_lock);
- }
- spin_unlock_bh(&ipvs->sync_buff_lock);
+ return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask;
}
/*
@@ -442,15 +425,101 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
return sb;
}
+/* Check if conn should be synced.
+ * pkts: conn packets, use sysctl_sync_threshold to avoid packet check
+ * - (1) sync_refresh_period: reduce sync rate. Additionally, retry
+ * sync_retries times with period of sync_refresh_period/8
+ * - (2) if both sync_refresh_period and sync_period are 0 send sync only
+ * for state changes or only once when pkts matches sync_threshold
+ * - (3) templates: rate can be reduced only with sync_refresh_period or
+ * with (2)
+ */
+static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
+ struct ip_vs_conn *cp, int pkts)
+{
+ unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
+ unsigned long now = jiffies;
+ unsigned long n = (now + cp->timeout) & ~3UL;
+ unsigned int sync_refresh_period;
+ int sync_period;
+ int force;
+
+ /* Check if we sync in current state */
+ if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE))
+ force = 0;
+ else if (likely(cp->protocol == IPPROTO_TCP)) {
+ if (!((1 << cp->state) &
+ ((1 << IP_VS_TCP_S_ESTABLISHED) |
+ (1 << IP_VS_TCP_S_FIN_WAIT) |
+ (1 << IP_VS_TCP_S_CLOSE) |
+ (1 << IP_VS_TCP_S_CLOSE_WAIT) |
+ (1 << IP_VS_TCP_S_TIME_WAIT))))
+ return 0;
+ force = cp->state != cp->old_state;
+ if (force && cp->state != IP_VS_TCP_S_ESTABLISHED)
+ goto set;
+ } else if (unlikely(cp->protocol == IPPROTO_SCTP)) {
+ if (!((1 << cp->state) &
+ ((1 << IP_VS_SCTP_S_ESTABLISHED) |
+ (1 << IP_VS_SCTP_S_CLOSED) |
+ (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) |
+ (1 << IP_VS_SCTP_S_SHUT_ACK_SER))))
+ return 0;
+ force = cp->state != cp->old_state;
+ if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED)
+ goto set;
+ } else {
+ /* UDP or another protocol with single state */
+ force = 0;
+ }
+
+ sync_refresh_period = sysctl_sync_refresh_period(ipvs);
+ if (sync_refresh_period > 0) {
+ long diff = n - orig;
+ long min_diff = max(cp->timeout >> 1, 10UL * HZ);
+
+ /* Avoid sync if difference is below sync_refresh_period
+ * and below the half timeout.
+ */
+ if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) {
+ int retries = orig & 3;
+
+ if (retries >= sysctl_sync_retries(ipvs))
+ return 0;
+ if (time_before(now, orig - cp->timeout +
+ (sync_refresh_period >> 3)))
+ return 0;
+ n |= retries + 1;
+ }
+ }
+ sync_period = sysctl_sync_period(ipvs);
+ if (sync_period > 0) {
+ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) &&
+ pkts % sync_period != sysctl_sync_threshold(ipvs))
+ return 0;
+ } else if (sync_refresh_period <= 0 &&
+ pkts != sysctl_sync_threshold(ipvs))
+ return 0;
+
+set:
+ cp->old_state = cp->state;
+ n = cmpxchg(&cp->sync_endtime, orig, n);
+ return n == orig || force;
+}
+
/*
* Version 0 , could be switched in by sys_ctl.
* Add an ip_vs_conn information into the current sync_buff.
*/
-void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
+static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+ int pkts)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_sync_mesg_v0 *m;
struct ip_vs_sync_conn_v0 *s;
+ struct ip_vs_sync_buff *buff;
+ struct ipvs_master_sync_state *ms;
+ int id;
int len;
if (unlikely(cp->af != AF_INET))
@@ -459,21 +528,41 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return;
+ if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
+ return;
+
spin_lock(&ipvs->sync_buff_lock);
- if (!ipvs->sync_buff) {
- ipvs->sync_buff =
- ip_vs_sync_buff_create_v0(ipvs);
- if (!ipvs->sync_buff) {
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ spin_unlock(&ipvs->sync_buff_lock);
+ return;
+ }
+
+ id = select_master_thread_id(ipvs, cp);
+ ms = &ipvs->ms[id];
+ buff = ms->sync_buff;
+ if (buff) {
+ m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
+ /* Send buffer if it is for v1 */
+ if (!m->nr_conns) {
+ sb_queue_tail(ipvs, ms);
+ ms->sync_buff = NULL;
+ buff = NULL;
+ }
+ }
+ if (!buff) {
+ buff = ip_vs_sync_buff_create_v0(ipvs);
+ if (!buff) {
spin_unlock(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
+ ms->sync_buff = buff;
}
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
- m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
- s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
+ m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
+ s = (struct ip_vs_sync_conn_v0 *) buff->head;
/* copy members */
s->reserved = 0;
@@ -494,18 +583,24 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
m->nr_conns++;
m->size += len;
- ipvs->sync_buff->head += len;
+ buff->head += len;
/* check if there is a space for next one */
- if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
- sb_queue_tail(ipvs);
- ipvs->sync_buff = NULL;
+ if (buff->head + FULL_CONN_SIZE > buff->end) {
+ sb_queue_tail(ipvs, ms);
+ ms->sync_buff = NULL;
}
spin_unlock(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
- if (cp->control)
- ip_vs_sync_conn(net, cp->control);
+ cp = cp->control;
+ if (cp) {
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+ pkts = atomic_add_return(1, &cp->in_pkts);
+ else
+ pkts = sysctl_sync_threshold(ipvs);
+ ip_vs_sync_conn(net, cp->control, pkts);
+ }
}
/*
@@ -513,23 +608,29 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
* Called by ip_vs_in.
* Sending Version 1 messages
*/
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_sync_mesg *m;
union ip_vs_sync_conn *s;
+ struct ip_vs_sync_buff *buff;
+ struct ipvs_master_sync_state *ms;
+ int id;
__u8 *p;
unsigned int len, pe_name_len, pad;
/* Handle old version of the protocol */
if (sysctl_sync_ver(ipvs) == 0) {
- ip_vs_sync_conn_v0(net, cp);
+ ip_vs_sync_conn_v0(net, cp, pkts);
return;
}
/* Do not sync ONE PACKET */
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
goto control;
sloop:
+ if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
+ goto control;
+
/* Sanity checks */
pe_name_len = 0;
if (cp->pe_data_len) {
@@ -541,6 +642,13 @@ sloop:
}
spin_lock(&ipvs->sync_buff_lock);
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ spin_unlock(&ipvs->sync_buff_lock);
+ return;
+ }
+
+ id = select_master_thread_id(ipvs, cp);
+ ms = &ipvs->ms[id];
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
@@ -559,27 +667,32 @@ sloop:
/* check if there is a space for this one */
pad = 0;
- if (ipvs->sync_buff) {
- pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
- if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
- sb_queue_tail(ipvs);
- ipvs->sync_buff = NULL;
+ buff = ms->sync_buff;
+ if (buff) {
+ m = buff->mesg;
+ pad = (4 - (size_t) buff->head) & 3;
+ /* Send buffer if it is for v0 */
+ if (buff->head + len + pad > buff->end || m->reserved) {
+ sb_queue_tail(ipvs, ms);
+ ms->sync_buff = NULL;
+ buff = NULL;
pad = 0;
}
}
- if (!ipvs->sync_buff) {
- ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
- if (!ipvs->sync_buff) {
+ if (!buff) {
+ buff = ip_vs_sync_buff_create(ipvs);
+ if (!buff) {
spin_unlock(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
+ ms->sync_buff = buff;
+ m = buff->mesg;
}
- m = ipvs->sync_buff->mesg;
- p = ipvs->sync_buff->head;
- ipvs->sync_buff->head += pad + len;
+ p = buff->head;
+ buff->head += pad + len;
m->size += pad + len;
/* Add ev. padding from prev. sync_conn */
while (pad--)
@@ -644,16 +757,10 @@ control:
cp = cp->control;
if (!cp)
return;
- /*
- * Reduce sync rate for templates
- * i.e only increment in_pkts for Templates.
- */
- if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
- int pkts = atomic_add_return(1, &cp->in_pkts);
-
- if (pkts % sysctl_sync_period(ipvs) != 1)
- return;
- }
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+ pkts = atomic_add_return(1, &cp->in_pkts);
+ else
+ pkts = sysctl_sync_threshold(ipvs);
goto sloop;
}
@@ -731,9 +838,32 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
else
cp = ip_vs_ct_in_get(param);
- if (cp && param->pe_data) /* Free pe_data */
+ if (cp) {
+ /* Free pe_data */
kfree(param->pe_data);
- if (!cp) {
+
+ dest = cp->dest;
+ spin_lock(&cp->lock);
+ if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
+ !(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
+ if (flags & IP_VS_CONN_F_INACTIVE) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ } else {
+ atomic_inc(&dest->activeconns);
+ atomic_dec(&dest->inactconns);
+ }
+ }
+ flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
+ flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
+ cp->flags = flags;
+ spin_unlock(&cp->lock);
+ if (!dest) {
+ dest = ip_vs_try_bind_dest(cp);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ }
+ } else {
/*
* Find the appropriate destination for the connection.
* If it is not found the connection will remain unbound
@@ -742,18 +872,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
param->vport, protocol, fwmark, flags);
- /* Set the approprite ativity flag */
- if (protocol == IPPROTO_TCP) {
- if (state != IP_VS_TCP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
- } else if (protocol == IPPROTO_SCTP) {
- if (state != IP_VS_SCTP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
- }
cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
if (dest)
atomic_dec(&dest->refcnt);
@@ -763,34 +881,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
return;
}
- } else if (!cp->dest) {
- dest = ip_vs_try_bind_dest(cp);
- if (dest)
- atomic_dec(&dest->refcnt);
- } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
- (cp->state != state)) {
- /* update active/inactive flag for the connection */
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_TCP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags |= IP_VS_CONN_F_INACTIVE;
- } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state == IP_VS_TCP_S_ESTABLISHED)) {
- atomic_inc(&dest->activeconns);
- atomic_dec(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
- }
- } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
- (cp->state != state)) {
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_SCTP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
- }
}
if (opt)
@@ -839,7 +929,7 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer,
p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
for (i=0; i<m->nr_conns; i++) {
- unsigned flags, state;
+ unsigned int flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
@@ -1109,7 +1199,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
for (i=0; i<nr_conns; i++) {
union ip_vs_sync_conn *s;
- unsigned size;
+ unsigned int size;
int retc;
p = msg_end;
@@ -1149,6 +1239,28 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
/*
+ * Setup sndbuf (mode=1) or rcvbuf (mode=0)
+ */
+static void set_sock_size(struct sock *sk, int mode, int val)
+{
+ /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */
+ /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */
+ lock_sock(sk);
+ if (mode) {
+ val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
+ sysctl_wmem_max);
+ sk->sk_sndbuf = val * 2;
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ } else {
+ val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
+ sysctl_rmem_max);
+ sk->sk_rcvbuf = val * 2;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ }
+ release_sock(sk);
+}
+
+/*
* Setup loopback of outgoing multicasts on a sending socket
*/
static void set_mcast_loop(struct sock *sk, u_char loop)
@@ -1298,9 +1410,15 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
/*
* Set up sending multicast socket over UDP
*/
-static struct socket *make_send_sock(struct net *net)
+static struct socket *make_send_sock(struct net *net, int id)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ /* multicast addr */
+ struct sockaddr_in mcast_addr = {
+ .sin_family = AF_INET,
+ .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
+ .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
+ };
struct socket *sock;
int result;
@@ -1324,6 +1442,9 @@ static struct socket *make_send_sock(struct net *net)
set_mcast_loop(sock->sk, 0);
set_mcast_ttl(sock->sk, 1);
+ result = sysctl_sync_sock_size(ipvs);
+ if (result > 0)
+ set_sock_size(sock->sk, 1, result);
result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
if (result < 0) {
@@ -1349,9 +1470,15 @@ error:
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct net *net)
+static struct socket *make_receive_sock(struct net *net, int id)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ /* multicast addr */
+ struct sockaddr_in mcast_addr = {
+ .sin_family = AF_INET,
+ .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
+ .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
+ };
struct socket *sock;
int result;
@@ -1368,7 +1495,10 @@ static struct socket *make_receive_sock(struct net *net)
*/
sk_change_net(sock->sk, net);
/* it is equivalent to the REUSEADDR option in user-space */
- sock->sk->sk_reuse = 1;
+ sock->sk->sk_reuse = SK_CAN_REUSE;
+ result = sysctl_sync_sock_size(ipvs);
+ if (result > 0)
+ set_sock_size(sock->sk, 0, result);
result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
sizeof(struct sockaddr));
@@ -1411,18 +1541,22 @@ ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
return len;
}
-static void
+static int
ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
{
int msize;
+ int ret;
msize = msg->size;
/* Put size in network byte order */
msg->size = htons(msg->size);
- if (ip_vs_send_async(sock, (char *)msg, msize) != msize)
- pr_err("ip_vs_send_async error\n");
+ ret = ip_vs_send_async(sock, (char *)msg, msize);
+ if (ret >= 0 || ret == -EAGAIN)
+ return ret;
+ pr_err("ip_vs_send_async error %d\n", ret);
+ return 0;
}
static int
@@ -1438,48 +1572,90 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
iov.iov_base = buffer;
iov.iov_len = (size_t)buflen;
- len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
+ len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT);
if (len < 0)
- return -1;
+ return len;
LeaveFunction(7);
return len;
}
+/* Wakeup the master thread for sending */
+static void master_wakeup_work_handler(struct work_struct *work)
+{
+ struct ipvs_master_sync_state *ms =
+ container_of(work, struct ipvs_master_sync_state,
+ master_wakeup_work.work);
+ struct netns_ipvs *ipvs = ms->ipvs;
+
+ spin_lock_bh(&ipvs->sync_lock);
+ if (ms->sync_queue_len &&
+ ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
+ ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
+ wake_up_process(ms->master_thread);
+ }
+ spin_unlock_bh(&ipvs->sync_lock);
+}
+
+/* Get next buffer to send */
+static inline struct ip_vs_sync_buff *
+next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
+{
+ struct ip_vs_sync_buff *sb;
+
+ sb = sb_dequeue(ipvs, ms);
+ if (sb)
+ return sb;
+ /* Do not delay entries in buffer for more than 2 seconds */
+ return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME);
+}
static int sync_thread_master(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+ struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
+ struct sock *sk = tinfo->sock->sk;
struct ip_vs_sync_buff *sb;
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
- "syncid = %d\n",
- ipvs->master_mcast_ifn, ipvs->master_syncid);
+ "syncid = %d, id = %d\n",
+ ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
- while (!kthread_should_stop()) {
- while ((sb = sb_dequeue(ipvs))) {
- ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
- ip_vs_sync_buff_release(sb);
+ for (;;) {
+ sb = next_sync_buff(ipvs, ms);
+ if (unlikely(kthread_should_stop()))
+ break;
+ if (!sb) {
+ schedule_timeout(IPVS_SYNC_CHECK_PERIOD);
+ continue;
}
-
- /* check if entries stay in ipvs->sync_buff for 2 seconds */
- sb = get_curr_sync_buff(ipvs, 2 * HZ);
- if (sb) {
- ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
- ip_vs_sync_buff_release(sb);
+ while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
+ int ret = 0;
+
+ __wait_event_interruptible(*sk_sleep(sk),
+ sock_writeable(sk) ||
+ kthread_should_stop(),
+ ret);
+ if (unlikely(kthread_should_stop()))
+ goto done;
}
-
- schedule_timeout_interruptible(HZ);
+ ip_vs_sync_buff_release(sb);
}
+done:
+ __set_current_state(TASK_RUNNING);
+ if (sb)
+ ip_vs_sync_buff_release(sb);
+
/* clean up the sync_buff queue */
- while ((sb = sb_dequeue(ipvs)))
+ while ((sb = sb_dequeue(ipvs, ms)))
ip_vs_sync_buff_release(sb);
+ __set_current_state(TASK_RUNNING);
/* clean up the current sync_buff */
- sb = get_curr_sync_buff(ipvs, 0);
+ sb = get_curr_sync_buff(ipvs, ms, 0);
if (sb)
ip_vs_sync_buff_release(sb);
@@ -1498,8 +1674,8 @@ static int sync_thread_backup(void *data)
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
- "syncid = %d\n",
- ipvs->backup_mcast_ifn, ipvs->backup_syncid);
+ "syncid = %d, id = %d\n",
+ ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1511,7 +1687,8 @@ static int sync_thread_backup(void *data)
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->recv_mesg_maxlen);
if (len <= 0) {
- pr_err("receiving message error\n");
+ if (len != -EAGAIN)
+ pr_err("receiving message error\n");
break;
}
@@ -1535,86 +1712,140 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
{
struct ip_vs_sync_thread_data *tinfo;
- struct task_struct **realtask, *task;
+ struct task_struct **array = NULL, *task;
struct socket *sock;
struct netns_ipvs *ipvs = net_ipvs(net);
- char *name, *buf = NULL;
+ char *name;
int (*threadfn)(void *data);
+ int id, count;
int result = -ENOMEM;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+ if (!ipvs->sync_state) {
+ count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
+ ipvs->threads_mask = count - 1;
+ } else
+ count = ipvs->threads_mask + 1;
if (state == IP_VS_STATE_MASTER) {
- if (ipvs->master_thread)
+ if (ipvs->ms)
return -EEXIST;
strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
sizeof(ipvs->master_mcast_ifn));
ipvs->master_syncid = syncid;
- realtask = &ipvs->master_thread;
- name = "ipvs_master:%d";
+ name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
- sock = make_send_sock(net);
} else if (state == IP_VS_STATE_BACKUP) {
- if (ipvs->backup_thread)
+ if (ipvs->backup_threads)
return -EEXIST;
strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
sizeof(ipvs->backup_mcast_ifn));
ipvs->backup_syncid = syncid;
- realtask = &ipvs->backup_thread;
- name = "ipvs_backup:%d";
+ name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
- sock = make_receive_sock(net);
} else {
return -EINVAL;
}
- if (IS_ERR(sock)) {
- result = PTR_ERR(sock);
- goto out;
- }
+ if (state == IP_VS_STATE_MASTER) {
+ struct ipvs_master_sync_state *ms;
- set_sync_mesg_maxlen(net, state);
- if (state == IP_VS_STATE_BACKUP) {
- buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
- if (!buf)
- goto outsocket;
+ ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
+ if (!ipvs->ms)
+ goto out;
+ ms = ipvs->ms;
+ for (id = 0; id < count; id++, ms++) {
+ INIT_LIST_HEAD(&ms->sync_queue);
+ ms->sync_queue_len = 0;
+ ms->sync_queue_delay = 0;
+ INIT_DELAYED_WORK(&ms->master_wakeup_work,
+ master_wakeup_work_handler);
+ ms->ipvs = ipvs;
+ }
+ } else {
+ array = kzalloc(count * sizeof(struct task_struct *),
+ GFP_KERNEL);
+ if (!array)
+ goto out;
}
+ set_sync_mesg_maxlen(net, state);
- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
- if (!tinfo)
- goto outbuf;
-
- tinfo->net = net;
- tinfo->sock = sock;
- tinfo->buf = buf;
+ tinfo = NULL;
+ for (id = 0; id < count; id++) {
+ if (state == IP_VS_STATE_MASTER)
+ sock = make_send_sock(net, id);
+ else
+ sock = make_receive_sock(net, id);
+ if (IS_ERR(sock)) {
+ result = PTR_ERR(sock);
+ goto outtinfo;
+ }
+ tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ if (!tinfo)
+ goto outsocket;
+ tinfo->net = net;
+ tinfo->sock = sock;
+ if (state == IP_VS_STATE_BACKUP) {
+ tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
+ GFP_KERNEL);
+ if (!tinfo->buf)
+ goto outtinfo;
+ }
+ tinfo->id = id;
- task = kthread_run(threadfn, tinfo, name, ipvs->gen);
- if (IS_ERR(task)) {
- result = PTR_ERR(task);
- goto outtinfo;
+ task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
+ if (IS_ERR(task)) {
+ result = PTR_ERR(task);
+ goto outtinfo;
+ }
+ tinfo = NULL;
+ if (state == IP_VS_STATE_MASTER)
+ ipvs->ms[id].master_thread = task;
+ else
+ array[id] = task;
}
/* mark as active */
- *realtask = task;
+
+ if (state == IP_VS_STATE_BACKUP)
+ ipvs->backup_threads = array;
+ spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state;
+ spin_unlock_bh(&ipvs->sync_buff_lock);
/* increase the module use count */
ip_vs_use_count_inc();
return 0;
-outtinfo:
- kfree(tinfo);
-outbuf:
- kfree(buf);
outsocket:
sk_release_kernel(sock->sk);
+
+outtinfo:
+ if (tinfo) {
+ sk_release_kernel(tinfo->sock->sk);
+ kfree(tinfo->buf);
+ kfree(tinfo);
+ }
+ count = id;
+ while (count-- > 0) {
+ if (state == IP_VS_STATE_MASTER)
+ kthread_stop(ipvs->ms[count].master_thread);
+ else
+ kthread_stop(array[count]);
+ }
+ kfree(array);
+
out:
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ kfree(ipvs->ms);
+ ipvs->ms = NULL;
+ }
return result;
}
@@ -1622,38 +1853,60 @@ out:
int stop_sync_thread(struct net *net, int state)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ struct task_struct **array;
+ int id;
int retc = -EINVAL;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
if (state == IP_VS_STATE_MASTER) {
- if (!ipvs->master_thread)
+ if (!ipvs->ms)
return -ESRCH;
- pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(ipvs->master_thread));
-
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
* add sync buffers to the queue, when we are already in
* progress of stopping the master sync daemon.
*/
- spin_lock_bh(&ipvs->sync_lock);
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ spin_lock(&ipvs->sync_lock);
ipvs->sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock_bh(&ipvs->sync_lock);
- retc = kthread_stop(ipvs->master_thread);
- ipvs->master_thread = NULL;
+ spin_unlock(&ipvs->sync_lock);
+ spin_unlock_bh(&ipvs->sync_buff_lock);
+
+ retc = 0;
+ for (id = ipvs->threads_mask; id >= 0; id--) {
+ struct ipvs_master_sync_state *ms = &ipvs->ms[id];
+ int ret;
+
+ pr_info("stopping master sync thread %d ...\n",
+ task_pid_nr(ms->master_thread));
+ cancel_delayed_work_sync(&ms->master_wakeup_work);
+ ret = kthread_stop(ms->master_thread);
+ if (retc >= 0)
+ retc = ret;
+ }
+ kfree(ipvs->ms);
+ ipvs->ms = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!ipvs->backup_thread)
+ if (!ipvs->backup_threads)
return -ESRCH;
- pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(ipvs->backup_thread));
-
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
- retc = kthread_stop(ipvs->backup_thread);
- ipvs->backup_thread = NULL;
+ array = ipvs->backup_threads;
+ retc = 0;
+ for (id = ipvs->threads_mask; id >= 0; id--) {
+ int ret;
+
+ pr_info("stopping backup sync thread %d ...\n",
+ task_pid_nr(array[id]));
+ ret = kthread_stop(array[id]);
+ if (retc >= 0)
+ retc = ret;
+ }
+ kfree(array);
+ ipvs->backup_threads = NULL;
}
/* decrease the module use count */
@@ -1670,13 +1923,8 @@ int __net_init ip_vs_sync_net_init(struct net *net)
struct netns_ipvs *ipvs = net_ipvs(net);
__mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
- INIT_LIST_HEAD(&ipvs->sync_queue);
spin_lock_init(&ipvs->sync_lock);
spin_lock_init(&ipvs->sync_buff_lock);
-
- ipvs->sync_mcast_addr.sin_family = AF_INET;
- ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
- ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index fd0d4e09876a..231be7dd547a 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -84,7 +84,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
/*
* Allocate the mark variable for WRR scheduling
*/
- mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
+ mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL);
if (mark == NULL)
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda05986..d61e0782a797 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,8 +69,8 @@ static int nf_conntrack_acct_init_sysctl(struct net *net)
table[0].data = &net->ct.sysctl_acct;
- net->ct.acct_sysctl_header = register_net_sysctl_table(net,
- nf_net_netfilter_sysctl_path, table);
+ net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
+ table);
if (!net->ct.acct_sysctl_header) {
printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
goto out_register;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 13fd2c55e329..f2de8c55ac50 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -107,8 +107,7 @@ static int amanda_help(struct sk_buff *skb,
/* No data? */
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len) {
- if (net_ratelimit())
- printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len);
+ net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3cc4487ac349..ac3af97cc468 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -683,10 +683,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
if (!early_drop(net, hash_bucket(hash, net))) {
atomic_dec(&net->ct.count);
- if (net_ratelimit())
- printk(KERN_WARNING
- "nf_conntrack: table full, dropping"
- " packet.\n");
+ net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
return ERR_PTR(-ENOMEM);
}
}
@@ -1152,8 +1149,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
- NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
+ if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+ nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -1335,7 +1333,6 @@ static void nf_conntrack_cleanup_init_net(void)
while (untrack_refs() > 0)
schedule();
- nf_conntrack_helper_fini();
nf_conntrack_proto_fini();
#ifdef CONFIG_NF_CONNTRACK_ZONES
nf_ct_extend_unregister(&nf_ct_zone_extend);
@@ -1353,6 +1350,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
}
nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
+ nf_conntrack_helper_fini(net);
nf_conntrack_timeout_fini(net);
nf_conntrack_ecache_fini(net);
nf_conntrack_tstamp_fini(net);
@@ -1503,10 +1501,6 @@ static int nf_conntrack_init_init_net(void)
if (ret < 0)
goto err_proto;
- ret = nf_conntrack_helper_init();
- if (ret < 0)
- goto err_helper;
-
#ifdef CONFIG_NF_CONNTRACK_ZONES
ret = nf_ct_extend_register(&nf_ct_zone_extend);
if (ret < 0)
@@ -1524,10 +1518,8 @@ static int nf_conntrack_init_init_net(void)
#ifdef CONFIG_NF_CONNTRACK_ZONES
err_extend:
- nf_conntrack_helper_fini();
-#endif
-err_helper:
nf_conntrack_proto_fini();
+#endif
err_proto:
return ret;
}
@@ -1588,11 +1580,16 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_timeout_init(net);
if (ret < 0)
goto err_timeout;
+ ret = nf_conntrack_helper_init(net);
+ if (ret < 0)
+ goto err_helper;
return 0;
-err_timeout:
+err_helper:
nf_conntrack_timeout_fini(net);
+err_timeout:
+ nf_conntrack_ecache_fini(net);
err_ecache:
nf_conntrack_tstamp_fini(net);
err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5bd3047ddeec..e7be79e640de 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
int nf_conntrack_register_notifier(struct net *net,
struct nf_ct_event_notifier *new)
{
- int ret = 0;
+ int ret;
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
@@ -95,8 +95,7 @@ int nf_conntrack_register_notifier(struct net *net,
goto out_unlock;
}
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
- mutex_unlock(&nf_ct_ecache_mutex);
- return ret;
+ ret = 0;
out_unlock:
mutex_unlock(&nf_ct_ecache_mutex);
@@ -121,7 +120,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
int nf_ct_expect_register_notifier(struct net *net,
struct nf_exp_event_notifier *new)
{
- int ret = 0;
+ int ret;
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
@@ -132,8 +131,7 @@ int nf_ct_expect_register_notifier(struct net *net,
goto out_unlock;
}
rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
- mutex_unlock(&nf_ct_ecache_mutex);
- return ret;
+ ret = 0;
out_unlock:
mutex_unlock(&nf_ct_ecache_mutex);
@@ -199,8 +197,7 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
table[1].data = &net->ct.sysctl_events_retry_timeout;
net->ct.event_sysctl_header =
- register_net_sysctl_table(net,
- nf_net_netfilter_sysctl_path, table);
+ register_net_sysctl(net, "net/netfilter", table);
if (!net->ct.event_sysctl_header) {
printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
goto out_register;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4147ba3f653c..45cf602a76bc 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -424,9 +424,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
if (net->ct.expect_count >= nf_ct_expect_max) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "nf_conntrack: expectation table full\n");
+ net_warn_ratelimited("nf_conntrack: expectation table full\n");
ret = -EMFILE;
}
out:
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f8af72..46d69d7f1bb4 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -605,8 +605,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_h245: packet dropped\n");
+ net_info_ratelimited("nf_ct_h245: packet dropped\n");
return NF_DROP;
}
@@ -1156,8 +1155,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_q931: packet dropped\n");
+ net_info_ratelimited("nf_ct_q931: packet dropped\n");
return NF_DROP;
}
@@ -1230,7 +1228,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
/****************************************************************************/
static int set_expect_timeout(struct nf_conntrack_expect *exp,
- unsigned timeout)
+ unsigned int timeout)
{
if (!exp || !del_timer(&exp->timeout))
return 0;
@@ -1731,8 +1729,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
drop:
spin_unlock_bh(&nf_h323_lock);
- if (net_ratelimit())
- pr_info("nf_ct_ras: packet dropped\n");
+ net_info_ratelimited("nf_ct_ras: packet dropped\n");
return NF_DROP;
}
@@ -1833,4 +1830,6 @@ MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
MODULE_DESCRIPTION("H.323 connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_h323");
-MODULE_ALIAS_NFCT_HELPER("h323");
+MODULE_ALIAS_NFCT_HELPER("RAS");
+MODULE_ALIAS_NFCT_HELPER("Q.931");
+MODULE_ALIAS_NFCT_HELPER("H.245");
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 436b7cb79ba4..4fa2ff961f5a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -34,6 +34,67 @@ static struct hlist_head *nf_ct_helper_hash __read_mostly;
static unsigned int nf_ct_helper_hsize __read_mostly;
static unsigned int nf_ct_helper_count __read_mostly;
+static bool nf_ct_auto_assign_helper __read_mostly = true;
+module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
+MODULE_PARM_DESC(nf_conntrack_helper,
+ "Enable automatic conntrack helper assignment (default 1)");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table helper_sysctl_table[] = {
+ {
+ .procname = "nf_conntrack_helper",
+ .data = &init_net.ct.sysctl_auto_assign_helper,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static int nf_conntrack_helper_init_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ table[0].data = &net->ct.sysctl_auto_assign_helper;
+
+ net->ct.helper_sysctl_header =
+ register_net_sysctl(net, "net/netfilter", table);
+
+ if (!net->ct.helper_sysctl_header) {
+ pr_err("nf_conntrack_helper: can't register to sysctl.\n");
+ goto out_register;
+ }
+ return 0;
+
+out_register:
+ kfree(table);
+out:
+ return -ENOMEM;
+}
+
+static void nf_conntrack_helper_fini_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ct.helper_sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->ct.helper_sysctl_header);
+ kfree(table);
+}
+#else
+static int nf_conntrack_helper_init_sysctl(struct net *net)
+{
+ return 0;
+}
+
+static void nf_conntrack_helper_fini_sysctl(struct net *net)
+{
+}
+#endif /* CONFIG_SYSCTL */
/* Stupid hash, but collision free for the default registrations of the
* helpers currently in the kernel. */
@@ -118,17 +179,38 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
{
struct nf_conntrack_helper *helper = NULL;
struct nf_conn_help *help;
+ struct net *net = nf_ct_net(ct);
int ret = 0;
+ /* We already got a helper explicitly attached. The function
+ * nf_conntrack_alter_reply - in case NAT is in use - asks for looking
+ * the helper up again. Since now the user is in full control of
+ * making consistent helper configurations, skip this automatic
+ * re-lookup, otherwise we'll lose the helper.
+ */
+ if (test_bit(IPS_HELPER_BIT, &ct->status))
+ return 0;
+
if (tmpl != NULL) {
help = nfct_help(tmpl);
- if (help != NULL)
+ if (help != NULL) {
helper = help->helper;
+ set_bit(IPS_HELPER_BIT, &ct->status);
+ }
}
help = nfct_help(ct);
- if (helper == NULL)
+ if (net->ct.sysctl_auto_assign_helper && helper == NULL) {
helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ if (unlikely(!net->ct.auto_assign_helper_warned && helper)) {
+ pr_info("nf_conntrack: automatic helper "
+ "assignment is deprecated and it will "
+ "be removed soon. Use the iptables CT target "
+ "to attach helpers instead.\n");
+ net->ct.auto_assign_helper_warned = true;
+ }
+ }
+
if (helper == NULL) {
if (help)
RCU_INIT_POINTER(help->helper, NULL);
@@ -315,28 +397,44 @@ static struct nf_ct_ext_type helper_extend __read_mostly = {
.id = NF_CT_EXT_HELPER,
};
-int nf_conntrack_helper_init(void)
+int nf_conntrack_helper_init(struct net *net)
{
int err;
- nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
- nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
- if (!nf_ct_helper_hash)
- return -ENOMEM;
+ net->ct.auto_assign_helper_warned = false;
+ net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
+
+ if (net_eq(net, &init_net)) {
+ nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
+ nf_ct_helper_hash =
+ nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
+ if (!nf_ct_helper_hash)
+ return -ENOMEM;
- err = nf_ct_extend_register(&helper_extend);
+ err = nf_ct_extend_register(&helper_extend);
+ if (err < 0)
+ goto err1;
+ }
+
+ err = nf_conntrack_helper_init_sysctl(net);
if (err < 0)
- goto err1;
+ goto out_sysctl;
return 0;
+out_sysctl:
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&helper_extend);
err1:
nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
return err;
}
-void nf_conntrack_helper_fini(void)
+void nf_conntrack_helper_fini(struct net *net)
{
- nf_ct_extend_unregister(&helper_extend);
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+ nf_conntrack_helper_fini_sysctl(net);
+ if (net_eq(net, &init_net)) {
+ nf_ct_extend_unregister(&helper_extend);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+ }
}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 4f9390b98697..81366c118271 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -185,11 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
tuple = &ct->tuplehash[dir].tuple;
if (tuple->src.u3.ip != dcc_ip &&
tuple->dst.u3.ip != dcc_ip) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "Forged DCC command from %pI4: %pI4:%u\n",
- &tuple->src.u3.ip,
- &dcc_ip, dcc_port);
+ net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+ &tuple->src.u3.ip,
+ &dcc_ip, dcc_port);
continue;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ca7e8354e4f8..6f4b00a8fc73 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
+ if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
+ goto nla_put_failure;
if (likely(l4proto->tuple_to_nlattr))
ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -126,7 +127,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
static inline int
ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
{
- NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
+ if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -141,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
if (timeout < 0)
timeout = 0;
- NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
+ if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -190,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
if (!nest_helper)
goto nla_put_failure;
- NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
+ if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
+ goto nla_put_failure;
if (helper->to_nlattr)
helper->to_nlattr(skb, ct);
@@ -214,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
if (!nest_count)
goto nla_put_failure;
- NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
- NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
+ if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
+ nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+ goto nla_put_failure;
nla_nest_end(skb, nest_count);
@@ -260,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
if (!nest_count)
goto nla_put_failure;
- NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
- if (tstamp->stop != 0) {
- NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
- cpu_to_be64(tstamp->stop));
- }
+ if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+ (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
+ cpu_to_be64(tstamp->stop))))
+ goto nla_put_failure;
nla_nest_end(skb, nest_count);
return 0;
@@ -277,7 +281,8 @@ nla_put_failure:
static inline int
ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
{
- NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
+ if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -304,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
if (!nest_secctx)
goto nla_put_failure;
- NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+ if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
+ goto nla_put_failure;
nla_nest_end(skb, nest_secctx);
ret = 0;
@@ -349,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
- htonl(natseq->correction_pos));
- NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
- htonl(natseq->offset_before));
- NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
- htonl(natseq->offset_after));
+ if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
+ htonl(natseq->correction_pos)) ||
+ nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
+ htonl(natseq->offset_before)) ||
+ nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
+ htonl(natseq->offset_after)))
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
@@ -390,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
static inline int
ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
{
- NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
+ if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -400,7 +408,8 @@ nla_put_failure:
static inline int
ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
{
- NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
+ if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -440,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct))
- NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+ if (nf_ct_zone(ct) &&
+ nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -617,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct))
- NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+ if (nf_ct_zone(ct) &&
+ nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
@@ -1705,7 +1716,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+ if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
+ goto nla_put_failure;
nat_tuple.src.l3num = nf_ct_l3num(master);
nat_tuple.src.u3.ip = exp->saved_ip;
@@ -1718,21 +1730,24 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
nla_nest_end(skb, nest_parms);
}
#endif
- NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
- NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
- NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
- NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
+ if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
+ nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+ nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
+ nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
+ goto nla_put_failure;
help = nfct_help(master);
if (help) {
struct nf_conntrack_helper *helper;
helper = rcu_dereference(help->helper);
- if (helper)
- NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
+ if (helper &&
+ nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
+ goto nla_put_failure;
}
expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
- if (expfn != NULL)
- NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
+ if (expfn != NULL &&
+ nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
+ goto nla_put_failure;
return 0;
@@ -2065,7 +2080,15 @@ static int
ctnetlink_change_expect(struct nf_conntrack_expect *x,
const struct nlattr * const cda[])
{
- return -EOPNOTSUPP;
+ if (cda[CTA_EXPECT_TIMEOUT]) {
+ if (!del_timer(&x->timeout))
+ return -ETIME;
+
+ x->timeout.expires = jiffies +
+ ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
+ add_timer(&x->timeout);
+ }
+ return 0;
}
static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index be3da2c8cdc5..8b631b07a645 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,11 +36,11 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
static int
-nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path,
+nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
struct ctl_table *table, unsigned int *users)
{
if (*header == NULL) {
- *header = register_sysctl_paths(path, table);
+ *header = register_net_sysctl(&init_net, path, table);
if (*header == NULL)
return -ENOMEM;
}
@@ -56,7 +56,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header,
if (users != NULL && --*users > 0)
return;
- unregister_sysctl_table(*header);
+ unregister_net_sysctl_table(*header);
*header = NULL;
}
#endif
@@ -250,7 +250,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
#ifdef CONFIG_SYSCTL
if (l4proto->ctl_table != NULL) {
err = nf_ct_register_sysctl(l4proto->ctl_table_header,
- nf_net_netfilter_sysctl_path,
+ "net/netfilter",
l4proto->ctl_table,
l4proto->ctl_table_users);
if (err < 0)
@@ -259,7 +259,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
if (l4proto->ctl_compat_table != NULL) {
err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
- nf_net_ipv4_netfilter_sysctl_path,
+ "net/ipv4/netfilter",
l4proto->ctl_compat_table, NULL);
if (err == 0)
goto out;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 24fdce256cb0..ef706a485be1 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -643,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
- NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
- ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
- NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
- cpu_to_be64(ct->proto.dccp.handshake_seq));
+ if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
+ nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
+ ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
+ nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+ cpu_to_be64(ct->proto.dccp.handshake_seq)))
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
spin_unlock_bh(&ct->lock);
return 0;
@@ -739,9 +740,10 @@ dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
const unsigned int *timeouts = data;
int i;
- for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
- NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+ if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+ goto nla_put_failure;
+ }
return 0;
nla_put_failure:
@@ -908,8 +910,8 @@ static __net_init int dccp_net_init(struct net *net)
dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
dn->sysctl_table[7].data = &dn->dccp_loose;
- dn->sysctl_header = register_net_sysctl_table(net,
- nf_net_netfilter_sysctl_path, dn->sysctl_table);
+ dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
+ dn->sysctl_table);
if (!dn->sysctl_header) {
kfree(dn->sysctl_table);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 835e24c58f0d..d8923d54b358 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -90,7 +90,8 @@ generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeout = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+ if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
+ goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 659648c4b14a..4bf6b4e4b776 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -321,10 +321,11 @@ gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
- htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
- htonl(timeouts[GRE_CT_REPLIED] / HZ));
+ if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+ htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
+ htonl(timeouts[GRE_CT_REPLIED] / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 72b5088592dc..996db2fa21f7 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -482,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
-
- NLA_PUT_BE32(skb,
- CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
- ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]);
-
- NLA_PUT_BE32(skb,
- CTA_PROTOINFO_SCTP_VTAG_REPLY,
- ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
+ if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
+ nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
+ nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
+ ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
+ goto nla_put_failure;
spin_unlock_bh(&ct->lock);
@@ -578,9 +575,10 @@ sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
const unsigned int *timeouts = data;
int i;
- for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
- NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+ if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+ goto nla_put_failure;
+ }
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 361eade62a09..21ff1a99f534 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -584,8 +584,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
* Let's try to use the data from the packet.
*/
sender->td_end = end;
- win <<= sender->td_scale;
- sender->td_maxwin = (win == 0 ? 1 : win);
+ swin = win << sender->td_scale;
+ sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
/*
* We haven't seen traffic in the other direction yet
@@ -952,7 +952,8 @@ static int tcp_packet(struct nf_conn *ct,
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
- "nf_ct_tcp: invalid packet ignored ");
+ "nf_ct_tcp: invalid packet ignored in "
+ "state %s ", tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Invalid packet */
@@ -1147,21 +1148,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
if (!nest_parms)
goto nla_put_failure;
- NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state);
-
- NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
- ct->proto.tcp.seen[0].td_scale);
-
- NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
- ct->proto.tcp.seen[1].td_scale);
+ if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
+ nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+ ct->proto.tcp.seen[0].td_scale) ||
+ nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
+ ct->proto.tcp.seen[1].td_scale))
+ goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[0].flags;
- NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
- sizeof(struct nf_ct_tcp_flags), &tmp);
+ if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+ sizeof(struct nf_ct_tcp_flags), &tmp))
+ goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[1].flags;
- NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
- sizeof(struct nf_ct_tcp_flags), &tmp);
+ if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
+ sizeof(struct nf_ct_tcp_flags), &tmp))
+ goto nla_put_failure;
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
@@ -1310,28 +1312,29 @@ tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
- htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
- htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
- htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
- htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
- htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
- htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
- htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
- htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
- htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
- htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
- htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+ if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+ htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+ htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+ htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
+ htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
+ htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a9073dc1548d..7259a6bdeb49 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -181,10 +181,11 @@ udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
- htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
- htonl(timeouts[UDP_CT_REPLIED] / HZ));
+ if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+ htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
+ htonl(timeouts[UDP_CT_REPLIED] / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index e0606392cda0..4d60a5376aa6 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -185,10 +185,11 @@ udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
- NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
- htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
- NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
- htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+ if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+ htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
+ nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+ htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab9bc28..9b3943252a5e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -468,18 +468,13 @@ static ctl_table nf_ct_netfilter_table[] = {
{ }
};
-static struct ctl_path nf_ct_path[] = {
- { .procname = "net", },
- { }
-};
-
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net)) {
nf_ct_netfilter_header =
- register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
+ register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
if (!nf_ct_netfilter_header)
goto out;
}
@@ -494,8 +489,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
- net->ct.sysctl_header = register_net_sysctl_table(net,
- nf_net_netfilter_sysctl_path, table);
+ net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!net->ct.sysctl_header)
goto out_unregister_netfilter;
@@ -505,7 +499,7 @@ out_unregister_netfilter:
kfree(table);
out_kmemdup:
if (net_eq(net, &init_net))
- unregister_sysctl_table(nf_ct_netfilter_header);
+ unregister_net_sysctl_table(nf_ct_netfilter_header);
out:
printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
return -ENOMEM;
@@ -516,7 +510,7 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
struct ctl_table *table;
if (net_eq(net, &init_net))
- unregister_sysctl_table(nf_ct_netfilter_header);
+ unregister_net_sysctl_table(nf_ct_netfilter_header);
table = net->ct.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->ct.sysctl_header);
kfree(table);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index e8d27afbbdb9..dbb364f62d6f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,8 +51,8 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
table[0].data = &net->ct.sysctl_tstamp;
- net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
- nf_net_netfilter_sysctl_path, table);
+ net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter",
+ table);
if (!net->ct.tstamp_sysctl_header) {
printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
goto out_register;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 957374a234d4..703fb26aa48d 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -214,13 +214,6 @@ static const struct file_operations nflog_file_ops = {
#endif /* PROC_FS */
#ifdef CONFIG_SYSCTL
-static struct ctl_path nf_log_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "netfilter", },
- { .procname = "nf_log", },
- { }
-};
-
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
static struct ctl_table_header *nf_log_dir_header;
@@ -283,7 +276,7 @@ static __init int netfilter_log_sysctl_init(void)
nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
}
- nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path,
+ nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log",
nf_log_sysctl_table);
if (!nf_log_dir_header)
return -ENOMEM;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e6ddde165612..3e797d1fcb94 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -103,7 +103,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group)
EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
- unsigned group, int echo, gfp_t flags)
+ unsigned int group, int echo, gfp_t flags)
{
return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index d98c868c148b..b2e7310ca0b8 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
- NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+ if (nla_put_string(skb, NFACCT_NAME, acct->name))
+ goto nla_put_failure;
if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
pkts = atomic64_read(&acct->pkts);
bytes = atomic64_read(&acct->bytes);
}
- NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
- NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
- NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+ if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
+ nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+ nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 2b9e79f5ef05..3e655288d1d6 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -170,11 +170,12 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
- NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
- NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
- NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto);
- NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
- htonl(atomic_read(&timeout->refcnt)));
+ if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
+ nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
+ nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+ nla_put_be32(skb, CTA_TIMEOUT_USE,
+ htonl(atomic_read(&timeout->refcnt))))
+ goto nla_put_failure;
if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
struct nlattr *nest_parms;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66b2c54c544f..3c3cfc0cc9b5 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst,
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
- NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
+ if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
+ goto nla_put_failure;
- if (prefix)
- NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
+ if (prefix &&
+ nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
+ goto nla_put_failure;
if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
- htonl(indev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+ htonl(indev->ifindex)))
+ goto nla_put_failure;
#else
if (pf == PF_BRIDGE) {
/* Case 1: outdev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
- htonl(indev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+ htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
- htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+ nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+ goto nla_put_failure;
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
- htonl(indev->ifindex));
- if (skb->nf_bridge && skb->nf_bridge->physindev)
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
- htonl(skb->nf_bridge->physindev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+ htonl(indev->ifindex)))
+ goto nla_put_failure;
+ if (skb->nf_bridge && skb->nf_bridge->physindev &&
+ nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+ htonl(skb->nf_bridge->physindev->ifindex)))
+ goto nla_put_failure;
}
#endif
}
if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
- htonl(outdev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+ htonl(outdev->ifindex)))
+ goto nla_put_failure;
#else
if (pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
- htonl(outdev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+ htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
- htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+ nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+ goto nla_put_failure;
} else {
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
- htonl(outdev->ifindex));
- if (skb->nf_bridge && skb->nf_bridge->physoutdev)
- NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
- htonl(skb->nf_bridge->physoutdev->ifindex));
+ if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+ htonl(outdev->ifindex)))
+ goto nla_put_failure;
+ if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+ nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+ htonl(skb->nf_bridge->physoutdev->ifindex)))
+ goto nla_put_failure;
}
#endif
}
- if (skb->mark)
- NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
+ if (skb->mark &&
+ nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
+ goto nla_put_failure;
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst,
int len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
- NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
+ if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
+ goto nla_put_failure;
}
}
if (indev && skb_mac_header_was_set(skb)) {
- NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
- NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
- htons(skb->dev->hard_header_len));
- NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
- skb_mac_header(skb));
+ if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+ nla_put_be16(inst->skb, NFULA_HWLEN,
+ htons(skb->dev->hard_header_len)) ||
+ nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
+ skb_mac_header(skb)))
+ goto nla_put_failure;
}
if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst,
ts.sec = cpu_to_be64(tv.tv_sec);
ts.usec = cpu_to_be64(tv.tv_usec);
- NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
+ if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
+ goto nla_put_failure;
}
/* UID */
@@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst,
struct file *file = skb->sk->sk_socket->file;
__be32 uid = htonl(file->f_cred->fsuid);
__be32 gid = htonl(file->f_cred->fsgid);
- /* need to unlock here since NLA_PUT may goto */
read_unlock_bh(&skb->sk->sk_callback_lock);
- NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
- NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
+ if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
+ nla_put_be32(inst->skb, NFULA_GID, gid))
+ goto nla_put_failure;
} else
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* local sequence number */
- if (inst->flags & NFULNL_CFG_F_SEQ)
- NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));
+ if ((inst->flags & NFULNL_CFG_F_SEQ) &&
+ nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
+ goto nla_put_failure;
/* global sequence number */
- if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
- NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
- htonl(atomic_inc_return(&global_seq)));
+ if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
+ nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
+ htonl(atomic_inc_return(&global_seq))))
+ goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a80b0cb03f17..4162437b8361 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
indev = entry->indev;
if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
- NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
+ goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: indev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
- htonl(indev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+ htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
- htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+ nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+ goto nla_put_failure;
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
- htonl(indev->ifindex));
- if (entskb->nf_bridge && entskb->nf_bridge->physindev)
- NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
- htonl(entskb->nf_bridge->physindev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+ htonl(indev->ifindex)))
+ goto nla_put_failure;
+ if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+ nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+ htonl(entskb->nf_bridge->physindev->ifindex)))
+ goto nla_put_failure;
}
#endif
}
if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
- NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
+ goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
- htonl(outdev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+ htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
- htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+ nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+ goto nla_put_failure;
} else {
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
- NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
- htonl(outdev->ifindex));
- if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
- NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
- htonl(entskb->nf_bridge->physoutdev->ifindex));
+ if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+ htonl(outdev->ifindex)))
+ goto nla_put_failure;
+ if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+ nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+ htonl(entskb->nf_bridge->physoutdev->ifindex)))
+ goto nla_put_failure;
}
#endif
}
- if (entskb->mark)
- NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
+ if (entskb->mark &&
+ nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
+ goto nla_put_failure;
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
int len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
- NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
+ if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
+ goto nla_put_failure;
}
}
@@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
ts.sec = cpu_to_be64(tv.tv_sec);
ts.usec = cpu_to_be64(tv.tv_usec);
- NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
+ if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
+ goto nla_put_failure;
}
if (data_len) {
@@ -384,8 +395,7 @@ nlmsg_failure:
nla_put_failure:
if (skb)
kfree_skb(skb);
- if (net_ratelimit())
- printk(KERN_ERR "nf_queue: error creating packet message\n");
+ net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
}
@@ -422,10 +432,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
}
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
- if (net_ratelimit())
- printk(KERN_WARNING "nf_queue: full at %d entries, "
- "dropping packets(s).\n",
- queue->queue_total);
+ net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+ queue->queue_total);
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..a51de9b052be 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -17,7 +17,6 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -227,7 +226,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- if (info->timeout) {
+ if (info->timeout[0]) {
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct nf_conn_timeout *timeout_ext;
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
new file mode 100644
index 000000000000..0a96a43108ed
--- /dev/null
+++ b/net/netfilter/xt_HMARK.c
@@ -0,0 +1,362 @@
+/*
+ * xt_HMARK - Netfilter module to set mark by means of hashing
+ *
+ * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HMARK.h>
+
+#include <net/ip.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+#include <net/ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>");
+MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
+MODULE_ALIAS("ipt_HMARK");
+MODULE_ALIAS("ip6t_HMARK");
+
+struct hmark_tuple {
+ u32 src;
+ u32 dst;
+ union hmark_ports uports;
+ uint8_t proto;
+};
+
+static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+{
+ return (addr32[0] & mask[0]) ^
+ (addr32[1] & mask[1]) ^
+ (addr32[2] & mask[2]) ^
+ (addr32[3] & mask[3]);
+}
+
+static inline u32
+hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+{
+ switch (l3num) {
+ case AF_INET:
+ return *addr32 & *mask;
+ case AF_INET6:
+ return hmark_addr6_mask(addr32, mask);
+ }
+ return 0;
+}
+
+static int
+hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conntrack_tuple *otuple;
+ struct nf_conntrack_tuple *rtuple;
+
+ if (ct == NULL || nf_ct_is_untracked(ct))
+ return -1;
+
+ otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
+ info->src_mask.all);
+ t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
+ info->dst_mask.all);
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = nf_ct_protonum(ct);
+ if (t->proto != IPPROTO_ICMP) {
+ t->uports.p16.src = otuple->src.u.all;
+ t->uports.p16.dst = rtuple->src.u.all;
+ t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+ info->port_set.v32;
+ if (t->uports.p16.dst < t->uports.p16.src)
+ swap(t->uports.p16.dst, t->uports.p16.src);
+ }
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline u32
+hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+ u32 hash;
+
+ if (t->dst < t->src)
+ swap(t->src, t->dst);
+
+ hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+ hash = hash ^ (t->proto & info->proto_mask);
+
+ return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
+}
+
+static void
+hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
+ struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+ int protoff;
+
+ protoff = proto_ports_offset(t->proto);
+ if (protoff < 0)
+ return;
+
+ nhoff += protoff;
+ if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
+ return;
+
+ t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+ info->port_set.v32;
+
+ if (t->uports.p16.dst < t->uports.p16.src)
+ swap(t->uports.p16.dst, t->uports.p16.src);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
+{
+ struct icmp6hdr *icmp6h, _ih6;
+
+ icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
+ if (icmp6h == NULL)
+ return 0;
+
+ if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
+ *offset += sizeof(struct icmp6hdr);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+ struct ipv6hdr *ip6, _ip6;
+ int flag = IP6T_FH_F_AUTH;
+ unsigned int nhoff = 0;
+ u16 fragoff = 0;
+ int nexthdr;
+
+ ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
+ nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+ if (nexthdr < 0)
+ return 0;
+ /* No need to check for icmp errors on fragments */
+ if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
+ goto noicmp;
+ /* Use inner header in case of ICMP errors */
+ if (get_inner6_hdr(skb, &nhoff)) {
+ ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
+ if (ip6 == NULL)
+ return -1;
+ /* If AH present, use SPI like in ESP. */
+ flag = IP6T_FH_F_AUTH;
+ nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+ if (nexthdr < 0)
+ return -1;
+ }
+noicmp:
+ t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
+ t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = nexthdr;
+ if (t->proto == IPPROTO_ICMPV6)
+ return 0;
+
+ if (flag & IP6T_FH_F_FRAG)
+ return 0;
+
+ hmark_set_tuple_ports(skb, nhoff, t, info);
+ return 0;
+}
+
+static unsigned int
+hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+ struct hmark_tuple t;
+
+ memset(&t, 0, sizeof(struct hmark_tuple));
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+ if (hmark_ct_set_htuple(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ } else {
+ if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ }
+
+ skb->mark = hmark_hash(&t, info);
+ return XT_CONTINUE;
+}
+#endif
+
+static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
+{
+ const struct icmphdr *icmph;
+ struct icmphdr _ih;
+
+ /* Not enough header? */
+ icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
+ if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
+ return 0;
+
+ /* Error message? */
+ if (icmph->type != ICMP_DEST_UNREACH &&
+ icmph->type != ICMP_SOURCE_QUENCH &&
+ icmph->type != ICMP_TIME_EXCEEDED &&
+ icmph->type != ICMP_PARAMETERPROB &&
+ icmph->type != ICMP_REDIRECT)
+ return 0;
+
+ *nhoff += iphsz + sizeof(_ih);
+ return 1;
+}
+
+static int
+hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
+ const struct xt_hmark_info *info)
+{
+ struct iphdr *ip, _ip;
+ int nhoff = skb_network_offset(skb);
+
+ ip = (struct iphdr *) (skb->data + nhoff);
+ if (ip->protocol == IPPROTO_ICMP) {
+ /* Use inner header in case of ICMP errors */
+ if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
+ ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
+ if (ip == NULL)
+ return -1;
+ }
+ }
+
+ t->src = (__force u32) ip->saddr;
+ t->dst = (__force u32) ip->daddr;
+
+ t->src &= info->src_mask.ip;
+ t->dst &= info->dst_mask.ip;
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+ return 0;
+
+ t->proto = ip->protocol;
+
+ /* ICMP has no ports, skip */
+ if (t->proto == IPPROTO_ICMP)
+ return 0;
+
+ /* follow-up fragments don't contain ports, skip all fragments */
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ return 0;
+
+ hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
+
+ return 0;
+}
+
+static unsigned int
+hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+ struct hmark_tuple t;
+
+ memset(&t, 0, sizeof(struct hmark_tuple));
+
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+ if (hmark_ct_set_htuple(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ } else {
+ if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
+ return XT_CONTINUE;
+ }
+
+ skb->mark = hmark_hash(&t, info);
+ return XT_CONTINUE;
+}
+
+static int hmark_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_hmark_info *info = par->targinfo;
+
+ if (!info->hmodulus) {
+ pr_info("xt_HMARK: hash modulus can't be zero\n");
+ return -EINVAL;
+ }
+ if (info->proto_mask &&
+ (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
+ pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
+ return -EINVAL;
+ }
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
+ (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
+ XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
+ pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
+ return -EINVAL;
+ }
+ if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
+ (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
+ XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
+ pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct xt_target hmark_tg_reg[] __read_mostly = {
+ {
+ .name = "HMARK",
+ .family = NFPROTO_IPV4,
+ .target = hmark_tg_v4,
+ .targetsize = sizeof(struct xt_hmark_info),
+ .checkentry = hmark_tg_check,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ {
+ .name = "HMARK",
+ .family = NFPROTO_IPV6,
+ .target = hmark_tg_v6,
+ .targetsize = sizeof(struct xt_hmark_info),
+ .checkentry = hmark_tg_check,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init hmark_tg_init(void)
+{
+ return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+static void __exit hmark_tg_exit(void)
+{
+ xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+module_init(hmark_tg_init);
+module_exit(hmark_tg_exit);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 190ad37c5cf8..71a266de5fb4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -67,15 +67,13 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (dst_mtu(skb_dst(skb)) <= minlen) {
- if (net_ratelimit())
- pr_err("unknown or invalid path-MTU (%u)\n",
- dst_mtu(skb_dst(skb)));
+ net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+ dst_mtu(skb_dst(skb)));
return -1;
}
if (in_mtu <= minlen) {
- if (net_ratelimit())
- pr_err("unknown or invalid path-MTU (%u)\n",
- in_mtu);
+ net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+ in_mtu);
return -1;
}
newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 4d5057902839..ee2e5bc5a8c7 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -87,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_tee_tginfo *info = par->targinfo;
struct iphdr *iph;
- if (percpu_read(tee_active))
+ if (__this_cpu_read(tee_active))
return XT_CONTINUE;
/*
* Copy the skb, and route the copy. Will later return %XT_CONTINUE for
@@ -124,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
ip_send_check(iph);
if (tee_tg_route4(skb, info)) {
- percpu_write(tee_active, true);
+ __this_cpu_write(tee_active, true);
ip_local_out(skb);
- percpu_write(tee_active, false);
+ __this_cpu_write(tee_active, false);
} else {
kfree_skb(skb);
}
@@ -168,7 +168,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- if (percpu_read(tee_active))
+ if (__this_cpu_read(tee_active))
return XT_CONTINUE;
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
@@ -186,9 +186,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
--iph->hop_limit;
}
if (tee_tg_route6(skb, info)) {
- percpu_write(tee_active, true);
+ __this_cpu_write(tee_active, true);
ip6_local_out(skb);
- percpu_write(tee_active, false);
+ __this_cpu_write(tee_active, false);
} else {
kfree_skb(skb);
}
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 35a959a096e0..146033a86de8 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -282,10 +282,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
struct sock *sk;
const struct in6_addr *laddr;
__be16 lport;
- int thoff;
+ int thoff = 0;
int tproto;
- tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
return NF_DROP;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d95f9c963cde..26a668a84aa2 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -171,8 +171,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
if (ht->cfg.max && ht->count >= ht->cfg.max) {
/* FIXME: do something. question is what.. */
- if (net_ratelimit())
- pr_err("max count of %u reached\n", ht->cfg.max);
+ net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
ent = NULL;
} else
ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
@@ -388,9 +387,20 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+/* in byte mode, the lowest possible rate is one packet/second.
+ * credit_cap is used as a counter that tells us how many times we can
+ * refill the "credits available" counter when it becomes empty.
+ */
+#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
+#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
+
+static u32 xt_hashlimit_len_to_chunks(u32 len)
+{
+ return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
+}
+
/* Precision saver. */
-static inline u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -400,12 +410,53 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
}
-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+static u32 user2credits_byte(u32 user)
{
- dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
- if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
- dh->rateinfo.credit = dh->rateinfo.credit_cap;
+ u64 us = user;
+ us *= HZ * CREDITS_PER_JIFFY_BYTES;
+ return (u32) (us >> 32);
+}
+
+static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
+{
+ unsigned long delta = now - dh->rateinfo.prev;
+ u32 cap;
+
+ if (delta == 0)
+ return;
+
dh->rateinfo.prev = now;
+
+ if (mode & XT_HASHLIMIT_BYTES) {
+ u32 tmp = dh->rateinfo.credit;
+ dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
+ cap = CREDITS_PER_JIFFY_BYTES * HZ;
+ if (tmp >= dh->rateinfo.credit) {/* overflow */
+ dh->rateinfo.credit = cap;
+ return;
+ }
+ } else {
+ dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
+ cap = dh->rateinfo.credit_cap;
+ }
+ if (dh->rateinfo.credit > cap)
+ dh->rateinfo.credit = cap;
+}
+
+static void rateinfo_init(struct dsthash_ent *dh,
+ struct xt_hashlimit_htable *hinfo)
+{
+ dh->rateinfo.prev = jiffies;
+ if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
+ dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+ dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
+ dh->rateinfo.credit_cap = hinfo->cfg.burst;
+ } else {
+ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+ hinfo->cfg.burst);
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ dh->rateinfo.credit_cap = dh->rateinfo.credit;
+ }
}
static inline __be32 maskl(__be32 a, unsigned int l)
@@ -511,6 +562,21 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
}
+static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
+{
+ u64 tmp = xt_hashlimit_len_to_chunks(len);
+ tmp = tmp * dh->rateinfo.cost;
+
+ if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
+ tmp = CREDITS_PER_JIFFY_BYTES * HZ;
+
+ if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
+ dh->rateinfo.credit_cap--;
+ dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+ }
+ return (u32) tmp;
+}
+
static bool
hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -519,6 +585,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
unsigned long now = jiffies;
struct dsthash_ent *dh;
struct dsthash_dst dst;
+ u32 cost;
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
@@ -532,21 +599,21 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
goto hotdrop;
}
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
- dh->rateinfo.prev = jiffies;
- dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ rateinfo_init(dh, hinfo);
} else {
/* update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now);
+ rateinfo_recalc(dh, now, hinfo->cfg.mode);
}
- if (dh->rateinfo.credit >= dh->rateinfo.cost) {
+ if (info->cfg.mode & XT_HASHLIMIT_BYTES)
+ cost = hashlimit_byte_cost(skb->len, dh);
+ else
+ cost = dh->rateinfo.cost;
+
+ if (dh->rateinfo.credit >= cost) {
/* below the limit */
- dh->rateinfo.credit -= dh->rateinfo.cost;
+ dh->rateinfo.credit -= cost;
spin_unlock(&dh->lock);
rcu_read_unlock_bh();
return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
@@ -568,14 +635,6 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
int ret;
- /* Check for overflow. */
- if (info->cfg.burst == 0 ||
- user2credits(info->cfg.avg * info->cfg.burst) <
- user2credits(info->cfg.avg)) {
- pr_info("overflow, try lower: %u/%u\n",
- info->cfg.avg, info->cfg.burst);
- return -ERANGE;
- }
if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
return -EINVAL;
if (info->name[sizeof(info->name)-1] != '\0')
@@ -588,6 +647,26 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
}
+ if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
+ pr_info("Unknown mode mask %X, kernel too old?\n",
+ info->cfg.mode);
+ return -EINVAL;
+ }
+
+ /* Check for overflow. */
+ if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
+ if (user2credits_byte(info->cfg.avg) == 0) {
+ pr_info("overflow, rate too high: %u\n", info->cfg.avg);
+ return -EINVAL;
+ }
+ } else if (info->cfg.burst == 0 ||
+ user2credits(info->cfg.avg * info->cfg.burst) <
+ user2credits(info->cfg.avg)) {
+ pr_info("overflow, try lower: %u/%u\n",
+ info->cfg.avg, info->cfg.burst);
+ return -ERANGE;
+ }
+
mutex_lock(&hashlimit_mutex);
info->hinfo = htable_find_get(net, info->name, par->family);
if (info->hinfo == NULL) {
@@ -680,10 +759,11 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
int res;
+ const struct xt_hashlimit_htable *ht = s->private;
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
- rateinfo_recalc(ent, jiffies);
+ rateinfo_recalc(ent, jiffies, ht->cfg.mode);
switch (family) {
case NFPROTO_IPV4:
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 32b7a579a032..5c22ce8ab309 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
/* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -123,7 +122,7 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
128. */
priv->prev = jiffies;
priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
+ r->credit_cap = priv->credit; /* Credits full. */
r->cost = user2credits(r->avg);
}
return 0;
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index 8160f6b1435d..d5b4fd4f91ed 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -36,7 +36,7 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (skb_mac_header(skb) + ETH_HLEN > skb->data)
return false;
- ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
+ ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
ret ^= info->invert;
return ret;
}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d2ff15a2412b..fc0d6dbe5d17 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -314,7 +314,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde;
#endif
- unsigned i;
+ unsigned int i;
int ret = -EINVAL;
if (unlikely(!hash_rnd_inited)) {
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 0ec8138aa470..035960ec5cb9 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -44,6 +44,14 @@ const struct ip_set_adt_opt n = { \
.cmdflags = cfs, \
.timeout = t, \
}
+#define ADT_MOPT(n, f, d, fs, cfs, t) \
+struct ip_set_adt_opt n = { \
+ .family = f, \
+ .dim = d, \
+ .flags = fs, \
+ .cmdflags = cfs, \
+ .timeout = t, \
+}
/* Revision 0 interface: backward compatible with netfilter/iptables */
@@ -296,11 +304,14 @@ static unsigned int
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_set_info_target_v2 *info = par->targinfo;
- ADT_OPT(add_opt, par->family, info->add_set.dim,
- info->add_set.flags, info->flags, info->timeout);
+ ADT_MOPT(add_opt, par->family, info->add_set.dim,
+ info->add_set.flags, info->flags, info->timeout);
ADT_OPT(del_opt, par->family, info->del_set.dim,
info->del_set.flags, 0, UINT_MAX);
+ /* Normalize to fit into jiffies */
+ if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+ add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 72bb07f57f97..9ea482d08cf7 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -263,10 +263,10 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk;
struct in6_addr *daddr, *saddr;
__be16 dport, sport;
- int thoff, tproto;
+ int thoff = 0, tproto;
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
- tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
if (tproto < 0) {
pr_debug("unable to find transport header in IPv6 packet, dropping\n");
return NF_DROP;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index faa48f70b7c9..b3025a603d56 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -104,27 +104,27 @@ static inline int netlink_is_kernel(struct sock *sk)
}
struct nl_pid_hash {
- struct hlist_head *table;
- unsigned long rehash_time;
+ struct hlist_head *table;
+ unsigned long rehash_time;
- unsigned int mask;
- unsigned int shift;
+ unsigned int mask;
+ unsigned int shift;
- unsigned int entries;
- unsigned int max_shift;
+ unsigned int entries;
+ unsigned int max_shift;
- u32 rnd;
+ u32 rnd;
};
struct netlink_table {
- struct nl_pid_hash hash;
- struct hlist_head mc_list;
- struct listeners __rcu *listeners;
- unsigned int nl_nonroot;
- unsigned int groups;
- struct mutex *cb_mutex;
- struct module *module;
- int registered;
+ struct nl_pid_hash hash;
+ struct hlist_head mc_list;
+ struct listeners __rcu *listeners;
+ unsigned int nl_nonroot;
+ unsigned int groups;
+ struct mutex *cb_mutex;
+ struct module *module;
+ int registered;
};
static struct netlink_table *nl_table;
@@ -132,7 +132,6 @@ static struct netlink_table *nl_table;
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static int netlink_dump(struct sock *sk);
-static void netlink_destroy_callback(struct netlink_callback *cb);
static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -149,6 +148,18 @@ static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid
return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
}
+static void netlink_destroy_callback(struct netlink_callback *cb)
+{
+ kfree_skb(cb->skb);
+ kfree(cb);
+}
+
+static void netlink_consume_callback(struct netlink_callback *cb)
+{
+ consume_skb(cb->skb);
+ kfree(cb);
+}
+
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -414,9 +425,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
- if (cb_mutex)
+ if (cb_mutex) {
nlk->cb_mutex = cb_mutex;
- else {
+ } else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
}
@@ -522,8 +533,9 @@ static int netlink_release(struct socket *sock)
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].registered = 0;
}
- } else if (nlk->subscriptions)
+ } else if (nlk->subscriptions) {
netlink_update_listeners(sk);
+ }
netlink_table_ungrab();
kfree(nlk->groups);
@@ -866,7 +878,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
struct sk_buff *nskb = skb_clone(skb, allocation);
if (!nskb)
return skb;
- kfree_skb(skb);
+ consume_skb(skb);
skb = nskb;
}
@@ -896,8 +908,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
ret = skb->len;
skb_set_owner_r(skb, sk);
nlk->netlink_rcv(skb);
+ consume_skb(skb);
+ } else {
+ kfree_skb(skb);
}
- kfree_skb(skb);
sock_put(sk);
return ret;
}
@@ -1086,8 +1100,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
if (info.delivery_failure) {
kfree_skb(info.skb2);
return -ENOBUFS;
- } else
- consume_skb(info.skb2);
+ }
+ consume_skb(info.skb2);
if (info.delivered) {
if (info.congested && (allocation & __GFP_WAIT))
@@ -1240,8 +1254,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
clear_bit(0, &nlk->state);
wake_up_interruptible(&nlk->wait);
- } else
+ } else {
nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+ }
err = 0;
break;
default:
@@ -1645,12 +1660,6 @@ void netlink_set_nonroot(int protocol, unsigned int flags)
}
EXPORT_SYMBOL(netlink_set_nonroot);
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
- kfree_skb(cb->skb);
- kfree(cb);
-}
-
struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
@@ -1727,7 +1736,7 @@ static int netlink_dump(struct sock *sk)
nlk->cb = NULL;
mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ netlink_consume_callback(cb);
return 0;
errout_skb:
@@ -1996,11 +2005,11 @@ static void netlink_seq_stop(struct seq_file *seq, void *v)
static int netlink_seq_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
+ if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"sk Eth Pid Groups "
"Rmem Wmem Dump Locks Drops Inode\n");
- else {
+ } else {
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 9f40441d7a7d..2cc7c1ee7690 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -635,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
if (hdr == NULL)
return -1;
- NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
- NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
- NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version);
- NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
- NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
+ if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
+ nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
+ nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
+ nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
+ nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
+ goto nla_put_failure;
if (!list_empty(&family->ops_list)) {
struct nlattr *nla_ops;
@@ -657,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
- NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
+ if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
+ nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
}
@@ -682,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
- NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
- grp->name);
+ if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+ nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+ grp->name))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
}
@@ -710,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
if (hdr == NULL)
return -1;
- NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
- NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
+ if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
+ nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
+ goto nla_put_failure;
nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
if (nla_grps == NULL)
@@ -721,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
- NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
- grp->name);
+ if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+ nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+ grp->name))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
nla_nest_end(skb, nla_grps);
@@ -831,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
- request_module("net-pf-%d-proto-%d-type-%s",
+ request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
genl_lock();
res = genl_family_find_byname(name);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 1c51d7a58f0b..743262becd6e 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -97,7 +97,7 @@ static int nr_rebuild_header(struct sk_buff *skb)
static int nr_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 1e0fa9e57aac..42f630b9a698 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,18 +146,12 @@ static ctl_table nr_table[] = {
{ }
};
-static struct ctl_path nr_path[] = {
- { .procname = "net", },
- { .procname = "netrom", },
- { }
-};
-
void __init nr_register_sysctl(void)
{
- nr_table_header = register_sysctl_paths(nr_path, nr_table);
+ nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
}
void nr_unregister_sysctl(void)
{
- unregister_sysctl_table(nr_table_header);
+ unregister_net_sysctl_table(nr_table_header);
}
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 44c865b86d6f..8d8d9bc4b6ff 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,7 @@ menuconfig NFC
be called nfc.
source "net/nfc/nci/Kconfig"
+source "net/nfc/hci/Kconfig"
source "net/nfc/llcp/Kconfig"
source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 7b4a6dcfa566..d1a117c2c401 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_NFC) += nfc.o
obj-$(CONFIG_NFC_NCI) += nci/
+obj-$(CONFIG_NFC_HCI) += hci/
nfc-objs := core.o netlink.o af_nfc.o rawsock.o
nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 295d129864d2..9f6ce011d35d 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -33,6 +33,8 @@
#define VERSION "0.1"
+#define NFC_CHECK_PRES_FREQ_MS 2000
+
int nfc_devlist_generation;
DEFINE_MUTEX(nfc_devlist_mutex);
@@ -95,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
goto error;
}
- if (dev->polling || dev->remote_activated) {
+ if (dev->polling || dev->active_target) {
rc = -EBUSY;
goto error;
}
@@ -181,11 +183,27 @@ error:
return rc;
}
+static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
+{
+ int i;
+
+ if (dev->n_targets == 0)
+ return NULL;
+
+ for (i = 0; i < dev->n_targets ; i++) {
+ if (dev->targets[i].idx == target_idx)
+ return &dev->targets[i];
+ }
+
+ return NULL;
+}
+
int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
{
int rc = 0;
u8 *gb;
size_t gb_len;
+ struct nfc_target *target;
pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
@@ -210,7 +228,15 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
goto error;
}
- rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
+ target = nfc_find_target(dev, target_index);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
+ if (!rc)
+ dev->active_target = target;
error:
device_unlock(&dev->dev);
@@ -246,6 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
rc = dev->ops->dep_link_down(dev);
if (!rc) {
dev->dep_link_up = false;
+ dev->active_target = NULL;
nfc_llcp_mac_is_down(dev);
nfc_genl_dep_link_down_event(dev);
}
@@ -277,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
{
int rc;
+ struct nfc_target *target;
pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
dev_name(&dev->dev), target_idx, protocol);
@@ -288,9 +316,25 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
goto error;
}
- rc = dev->ops->activate_target(dev, target_idx, protocol);
- if (!rc)
- dev->remote_activated = true;
+ if (dev->active_target) {
+ rc = -EBUSY;
+ goto error;
+ }
+
+ target = nfc_find_target(dev, target_idx);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->activate_target(dev, target, protocol);
+ if (!rc) {
+ dev->active_target = target;
+
+ if (dev->ops->check_presence)
+ mod_timer(&dev->check_pres_timer, jiffies +
+ msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+ }
error:
device_unlock(&dev->dev);
@@ -317,8 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
goto error;
}
- dev->ops->deactivate_target(dev, target_idx);
- dev->remote_activated = false;
+ if (dev->active_target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ if (dev->active_target->idx != target_idx) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ if (dev->ops->check_presence)
+ del_timer_sync(&dev->check_pres_timer);
+
+ dev->ops->deactivate_target(dev, dev->active_target);
+ dev->active_target = NULL;
error:
device_unlock(&dev->dev);
@@ -352,7 +409,27 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
goto error;
}
- rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
+ if (dev->active_target == NULL) {
+ rc = -ENOTCONN;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ if (dev->active_target->idx != target_idx) {
+ rc = -EADDRNOTAVAIL;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ if (dev->ops->check_presence)
+ del_timer_sync(&dev->check_pres_timer);
+
+ rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
+ cb_context);
+
+ if (!rc && dev->ops->check_presence)
+ mod_timer(&dev->check_pres_timer, jiffies +
+ msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
error:
device_unlock(&dev->dev);
@@ -424,15 +501,23 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
* The device driver must call this function when one or many nfc targets
* are found. After calling this function, the device driver must stop
* polling for targets.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
*/
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int n_targets)
{
+ int i;
+
pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
dev->polling = false;
- spin_lock_bh(&dev->targets_lock);
+ for (i = 0; i < n_targets; i++)
+ targets[i].idx = dev->target_next_idx++;
+
+ device_lock(&dev->dev);
dev->targets_generation++;
@@ -442,12 +527,12 @@ int nfc_targets_found(struct nfc_dev *dev,
if (!dev->targets) {
dev->n_targets = 0;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
return -ENOMEM;
}
dev->n_targets = n_targets;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
nfc_genl_targets_found(dev);
@@ -455,17 +540,105 @@ int nfc_targets_found(struct nfc_dev *dev,
}
EXPORT_SYMBOL(nfc_targets_found);
+/**
+ * nfc_target_lost - inform that an activated target went out of field
+ *
+ * @dev: The nfc device that had the activated target in field
+ * @target_idx: the nfc index of the target
+ *
+ * The device driver must call this function when the activated target
+ * goes out of the field.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
+ */
+int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
+{
+ struct nfc_target *tg;
+ int i;
+
+ pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
+
+ device_lock(&dev->dev);
+
+ for (i = 0; i < dev->n_targets; i++) {
+ tg = &dev->targets[i];
+ if (tg->idx == target_idx)
+ break;
+ }
+
+ if (i == dev->n_targets) {
+ device_unlock(&dev->dev);
+ return -EINVAL;
+ }
+
+ dev->targets_generation++;
+ dev->n_targets--;
+ dev->active_target = NULL;
+
+ if (dev->n_targets) {
+ memcpy(&dev->targets[i], &dev->targets[i + 1],
+ (dev->n_targets - i) * sizeof(struct nfc_target));
+ } else {
+ kfree(dev->targets);
+ dev->targets = NULL;
+ }
+
+ device_unlock(&dev->dev);
+
+ nfc_genl_target_lost(dev, target_idx);
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_target_lost);
+
static void nfc_release(struct device *d)
{
struct nfc_dev *dev = to_nfc_dev(d);
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+ if (dev->ops->check_presence) {
+ del_timer_sync(&dev->check_pres_timer);
+ destroy_workqueue(dev->check_pres_wq);
+ }
+
nfc_genl_data_exit(&dev->genl_data);
kfree(dev->targets);
kfree(dev);
}
+static void nfc_check_pres_work(struct work_struct *work)
+{
+ struct nfc_dev *dev = container_of(work, struct nfc_dev,
+ check_pres_work);
+ int rc;
+
+ device_lock(&dev->dev);
+
+ if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
+ rc = dev->ops->check_presence(dev, dev->active_target);
+ if (!rc) {
+ mod_timer(&dev->check_pres_timer, jiffies +
+ msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+ } else {
+ u32 active_target_idx = dev->active_target->idx;
+ device_unlock(&dev->dev);
+ nfc_target_lost(dev, active_target_idx);
+ return;
+ }
+ }
+
+ device_unlock(&dev->dev);
+}
+
+static void nfc_check_pres_timeout(unsigned long data)
+{
+ struct nfc_dev *dev = (struct nfc_dev *)data;
+
+ queue_work(dev->check_pres_wq, &dev->check_pres_work);
+}
+
struct class nfc_class = {
.name = "nfc",
.dev_release = nfc_release,
@@ -475,12 +648,12 @@ EXPORT_SYMBOL(nfc_class);
static int match_idx(struct device *d, void *data)
{
struct nfc_dev *dev = to_nfc_dev(d);
- unsigned *idx = data;
+ unsigned int *idx = data;
return dev->idx == *idx;
}
-struct nfc_dev *nfc_get_device(unsigned idx)
+struct nfc_dev *nfc_get_device(unsigned int idx)
{
struct device *d;
@@ -525,12 +698,29 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
dev->tx_headroom = tx_headroom;
dev->tx_tailroom = tx_tailroom;
- spin_lock_init(&dev->targets_lock);
nfc_genl_data_init(&dev->genl_data);
+
/* first generation must not be 0 */
dev->targets_generation = 1;
+ if (ops->check_presence) {
+ char name[32];
+ init_timer(&dev->check_pres_timer);
+ dev->check_pres_timer.data = (unsigned long)dev;
+ dev->check_pres_timer.function = nfc_check_pres_timeout;
+
+ INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
+ snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
+ dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
+ WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (dev->check_pres_wq == NULL) {
+ kfree(dev);
+ return NULL;
+ }
+ }
+
return dev;
}
EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
new file mode 100644
index 000000000000..fd67f51d18e9
--- /dev/null
+++ b/net/nfc/hci/Kconfig
@@ -0,0 +1,17 @@
+config NFC_HCI
+ depends on NFC
+ tristate "NFC HCI implementation"
+ default n
+ help
+ Say Y here if you want to build support for a kernel NFC HCI
+ implementation. This is mostly needed for devices that only process
+ HCI frames, like for example the NXP pn544.
+
+config NFC_SHDLC
+ depends on NFC_HCI
+ select CRC_CCITT
+ bool "SHDLC link layer for HCI based NFC drivers"
+ default n
+ ---help---
+ Say yes if you use an NFC HCI driver that requires SHDLC link layer.
+ If unsure, say N here.
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
new file mode 100644
index 000000000000..f9c44b2fb065
--- /dev/null
+++ b/net/nfc/hci/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux NFC HCI layer.
+#
+
+obj-$(CONFIG_NFC_HCI) += hci.o
+
+hci-y := core.o hcp.o command.o
+hci-$(CONFIG_NFC_SHDLC) += shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
new file mode 100644
index 000000000000..8729abf5f18b
--- /dev/null
+++ b/net/nfc/hci/command.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+static int nfc_hci_result_to_errno(u8 result)
+{
+ switch (result) {
+ case NFC_HCI_ANY_OK:
+ return 0;
+ case NFC_HCI_ANY_E_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ return -1;
+ }
+}
+
+static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb, void *cb_data)
+{
+ struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
+
+ pr_debug("HCI Cmd completed with HCI result=%d\n", result);
+
+ hcp_ew->exec_result = nfc_hci_result_to_errno(result);
+ if (hcp_ew->exec_result == 0)
+ hcp_ew->result_skb = skb;
+ else
+ kfree_skb(skb);
+ hcp_ew->exec_complete = true;
+
+ wake_up(hcp_ew->wq);
+}
+
+static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ const u8 *param, size_t param_len,
+ struct sk_buff **skb)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
+ struct hcp_exec_waiter hcp_ew;
+ hcp_ew.wq = &ew_wq;
+ hcp_ew.exec_complete = false;
+ hcp_ew.result_skb = NULL;
+
+ pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
+
+ /* TODO: Define hci cmd execution delay. Should it be the same
+ * for all commands?
+ */
+ hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
+ NFC_HCI_HCP_COMMAND, cmd,
+ param, param_len,
+ nfc_hci_execute_cb, &hcp_ew,
+ 3000);
+ if (hcp_ew.exec_result < 0)
+ return hcp_ew.exec_result;
+
+ wait_event(ew_wq, hcp_ew.exec_complete == true);
+
+ if (hcp_ew.exec_result == 0) {
+ if (skb)
+ *skb = hcp_ew.result_skb;
+ else
+ kfree_skb(hcp_ew.result_skb);
+ }
+
+ return hcp_ew.exec_result;
+}
+
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ const u8 *param, size_t param_len)
+{
+ u8 pipe;
+
+ pr_debug("%d to gate %d\n", event, gate);
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event,
+ param, param_len, NULL, NULL, 0);
+}
+EXPORT_SYMBOL(nfc_hci_send_event);
+
+int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
+ const u8 *param, size_t param_len)
+{
+ u8 pipe;
+
+ pr_debug("\n");
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
+ response, param, param_len, NULL, NULL,
+ 0);
+}
+EXPORT_SYMBOL(nfc_hci_send_response);
+
+/*
+ * Execute an hci command sent to gate.
+ * skb will contain response data if success. skb can be NULL if you are not
+ * interested by the response.
+ */
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len, struct sk_buff **skb)
+{
+ u8 pipe;
+
+ pr_debug("\n");
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd);
+
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len)
+{
+ int r;
+ u8 *tmp;
+
+ /* TODO ELa: reg idx must be inserted before param, but we don't want
+ * to ask the caller to do it to keep a simpler API.
+ * For now, just create a new temporary param buffer. This is far from
+ * optimal though, and the plan is to modify APIs to pass idx down to
+ * nfc_hci_hcp_message_tx where the frame is actually built, thereby
+ * eliminating the need for the temp allocation-copy here.
+ */
+
+ pr_debug("idx=%d to gate %d\n", idx, gate);
+
+ tmp = kmalloc(1 + param_len, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ *tmp = idx;
+ memcpy(tmp + 1, param, param_len);
+
+ r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER,
+ tmp, param_len + 1, NULL);
+
+ kfree(tmp);
+
+ return r;
+}
+EXPORT_SYMBOL(nfc_hci_set_param);
+
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ struct sk_buff **skb)
+{
+ pr_debug("gate=%d regidx=%d\n", gate, idx);
+
+ return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER,
+ &idx, 1, skb);
+}
+EXPORT_SYMBOL(nfc_hci_get_param);
+
+static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ struct sk_buff *skb;
+ int r;
+
+ pr_debug("pipe=%d\n", pipe);
+
+ r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE,
+ NULL, 0, &skb);
+ if (r == 0) {
+ /* dest host other than host controller will send
+ * number of pipes already open on this gate before
+ * execution. The number can be found in skb->data[0]
+ */
+ kfree_skb(skb);
+ }
+
+ return r;
+}
+
+static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ pr_debug("\n");
+
+ return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
+ NULL, 0, NULL);
+}
+
+static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
+ u8 dest_gate, int *result)
+{
+ struct sk_buff *skb;
+ struct hci_create_pipe_params params;
+ struct hci_create_pipe_resp *resp;
+ u8 pipe;
+
+ pr_debug("gate=%d\n", dest_gate);
+
+ params.src_gate = NFC_HCI_ADMIN_GATE;
+ params.dest_host = dest_host;
+ params.dest_gate = dest_gate;
+
+ *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_CREATE_PIPE,
+ (u8 *) &params, sizeof(params), &skb);
+ if (*result == 0) {
+ resp = (struct hci_create_pipe_resp *)skb->data;
+ pipe = resp->pipe;
+ kfree_skb(skb);
+
+ pr_debug("pipe created=%d\n", pipe);
+
+ return pipe;
+ } else
+ return NFC_HCI_INVALID_PIPE;
+}
+
+static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ pr_debug("\n");
+
+ return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
+}
+
+static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
+{
+ int r;
+
+ u8 param[2];
+
+ /* TODO: Find out what the identity reference data is
+ * and fill param with it. HCI spec 6.1.3.5 */
+
+ pr_debug("\n");
+
+ r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
+
+ return 0;
+}
+
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
+{
+ int r;
+ u8 pipe = hdev->gate2pipe[gate];
+
+ pr_debug("\n");
+
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ r = nfc_hci_close_pipe(hdev, pipe);
+ if (r < 0)
+ return r;
+
+ if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) {
+ r = nfc_hci_delete_pipe(hdev, pipe);
+ if (r < 0)
+ return r;
+ }
+
+ hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE;
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_gate);
+
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
+{
+ int r;
+
+ pr_debug("\n");
+
+ r = nfc_hci_clear_all_pipes(hdev);
+ if (r < 0)
+ return r;
+
+ memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
+
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
+{
+ u8 pipe = NFC_HCI_INVALID_PIPE;
+ bool pipe_created = false;
+ int r;
+
+ pr_debug("\n");
+
+ if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
+ return -EADDRINUSE;
+
+ switch (dest_gate) {
+ case NFC_HCI_LINK_MGMT_GATE:
+ pipe = NFC_HCI_LINK_MGMT_PIPE;
+ break;
+ case NFC_HCI_ADMIN_GATE:
+ pipe = NFC_HCI_ADMIN_PIPE;
+ break;
+ default:
+ pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r);
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return r;
+ pipe_created = true;
+ break;
+ }
+
+ r = nfc_hci_open_pipe(hdev, pipe);
+ if (r < 0) {
+ if (pipe_created)
+ if (nfc_hci_delete_pipe(hdev, pipe) < 0) {
+ /* TODO: Cannot clean by deleting pipe...
+ * -> inconsistent state */
+ }
+ return r;
+ }
+
+ hdev->gate2pipe[dest_gate] = pipe;
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_connect_gate);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
new file mode 100644
index 000000000000..e1a640d2b588
--- /dev/null
+++ b/net/nfc/hci/core.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+/* Largest headroom needed for outgoing HCI commands */
+#define HCI_CMDS_HEADROOM 1
+
+static void nfc_hci_msg_tx_work(struct work_struct *work)
+{
+ struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+ msg_tx_work);
+ struct hci_msg *msg;
+ struct sk_buff *skb;
+ int r = 0;
+
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg) {
+ if (timer_pending(&hdev->cmd_timer) == 0) {
+ if (hdev->cmd_pending_msg->cb)
+ hdev->cmd_pending_msg->cb(hdev,
+ NFC_HCI_ANY_E_TIMEOUT,
+ NULL,
+ hdev->
+ cmd_pending_msg->
+ cb_context);
+ kfree(hdev->cmd_pending_msg);
+ hdev->cmd_pending_msg = NULL;
+ } else
+ goto exit;
+ }
+
+next_msg:
+ if (list_empty(&hdev->msg_tx_queue))
+ goto exit;
+
+ msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l);
+ list_del(&msg->msg_l);
+
+ pr_debug("msg_tx_queue has a cmd to send\n");
+ while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
+ r = hdev->ops->xmit(hdev, skb);
+ if (r < 0) {
+ kfree_skb(skb);
+ skb_queue_purge(&msg->msg_frags);
+ if (msg->cb)
+ msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
+ msg->cb_context);
+ kfree(msg);
+ break;
+ }
+ }
+
+ if (r)
+ goto next_msg;
+
+ if (msg->wait_response == false) {
+ kfree(msg);
+ goto next_msg;
+ }
+
+ hdev->cmd_pending_msg = msg;
+ mod_timer(&hdev->cmd_timer, jiffies +
+ msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_msg_rx_work(struct work_struct *work)
+{
+ struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+ msg_rx_work);
+ struct sk_buff *skb;
+ struct hcp_message *message;
+ u8 pipe;
+ u8 type;
+ u8 instruction;
+
+ while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+ pipe = skb->data[0];
+ skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
+ message = (struct hcp_message *)skb->data;
+ type = HCP_MSG_GET_TYPE(message->header);
+ instruction = HCP_MSG_GET_CMD(message->header);
+ skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+
+ nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
+ }
+}
+
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ del_timer_sync(&hdev->cmd_timer);
+
+ if (hdev->cmd_pending_msg->cb)
+ hdev->cmd_pending_msg->cb(hdev, result, skb,
+ hdev->cmd_pending_msg->cb_context);
+ else
+ kfree_skb(skb);
+
+ kfree(hdev->cmd_pending_msg);
+ hdev->cmd_pending_msg = NULL;
+
+ queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static u32 nfc_hci_sak_to_protocol(u8 sak)
+{
+ switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
+ case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
+ return NFC_PROTO_MIFARE_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_ISO14443:
+ return NFC_PROTO_ISO14443_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_DEP:
+ return NFC_PROTO_NFC_DEP_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP:
+ return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK;
+ default:
+ return 0xffffffff;
+ }
+}
+
+static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
+{
+ struct nfc_target *targets;
+ struct sk_buff *atqa_skb = NULL;
+ struct sk_buff *sak_skb = NULL;
+ int r;
+
+ pr_debug("from gate %d\n", gate);
+
+ targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+ if (targets == NULL)
+ return -ENOMEM;
+
+ switch (gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_ATQA, &atqa_skb);
+ if (r < 0)
+ goto exit;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_SAK, &sak_skb);
+ if (r < 0)
+ goto exit;
+
+ if (atqa_skb->len != 2 || sak_skb->len != 1) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ targets->supported_protocols =
+ nfc_hci_sak_to_protocol(sak_skb->data[0]);
+ if (targets->supported_protocols == 0xffffffff) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
+ targets->sel_res = sak_skb->data[0];
+
+ if (hdev->ops->complete_target_discovered) {
+ r = hdev->ops->complete_target_discovered(hdev, gate,
+ targets);
+ if (r < 0)
+ goto exit;
+ }
+ break;
+ case NFC_HCI_RF_READER_B_GATE:
+ targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ break;
+ default:
+ if (hdev->ops->target_from_gate)
+ r = hdev->ops->target_from_gate(hdev, gate, targets);
+ else
+ r = -EPROTO;
+ if (r < 0)
+ goto exit;
+
+ if (hdev->ops->complete_target_discovered) {
+ r = hdev->ops->complete_target_discovered(hdev, gate,
+ targets);
+ if (r < 0)
+ goto exit;
+ }
+ break;
+ }
+
+ targets->hci_reader_gate = gate;
+
+ r = nfc_targets_found(hdev->ndev, targets, 1);
+
+exit:
+ kfree(targets);
+ kfree_skb(atqa_skb);
+ kfree_skb(sak_skb);
+
+ return r;
+}
+
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb)
+{
+ int r = 0;
+
+ switch (event) {
+ case NFC_HCI_EVT_TARGET_DISCOVERED:
+ if (skb->len < 1) { /* no status data? */
+ r = -EPROTO;
+ goto exit;
+ }
+
+ if (skb->data[0] == 3) {
+ /* TODO: Multiple targets in field, none activated
+ * poll is supposedly stopped, but there is no
+ * single target to activate, so nothing to report
+ * up.
+ * if we need to restart poll, we must save the
+ * protocols from the initial poll and reuse here.
+ */
+ }
+
+ if (skb->data[0] != 0) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ r = nfc_hci_target_discovered(hdev,
+ nfc_hci_pipe2gate(hdev, pipe));
+ break;
+ default:
+ /* TODO: Unknown events are hardware specific
+ * pass them to the driver (needs a new hci_ops) */
+ break;
+ }
+
+exit:
+ kfree_skb(skb);
+
+ if (r) {
+ /* TODO: There was an error dispatching the event,
+ * how to propagate up to nfc core?
+ */
+ }
+}
+
+static void nfc_hci_cmd_timeout(unsigned long data)
+{
+ struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
+
+ queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+}
+
+static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
+ u8 gates[])
+{
+ int r;
+ u8 *p = gates;
+ while (gate_count--) {
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
+ if (r < 0)
+ return r;
+ p++;
+ }
+
+ return 0;
+}
+
+static int hci_dev_session_init(struct nfc_hci_dev *hdev)
+{
+ struct sk_buff *skb = NULL;
+ int r;
+ u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */
+ NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
+ NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
+ NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
+ };
+
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ NFC_HCI_ADMIN_GATE);
+ if (r < 0)
+ goto exit;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
+ if (r < 0)
+ goto disconnect_all;
+
+ if (skb->len && skb->len == strlen(hdev->init_data.session_id))
+ if (memcmp(hdev->init_data.session_id, skb->data,
+ skb->len) == 0) {
+ /* TODO ELa: restore gate<->pipe table from
+ * some TBD location.
+ * note: it doesn't seem possible to get the chip
+ * currently open gate/pipe table.
+ * It is only possible to obtain the supported
+ * gate list.
+ */
+
+ /* goto exit
+ * For now, always do a full initialization */
+ }
+
+ r = nfc_hci_disconnect_all_gates(hdev);
+ if (r < 0)
+ goto exit;
+
+ r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
+ if (r < 0)
+ goto disconnect_all;
+
+ r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
+ hdev->init_data.gates);
+ if (r < 0)
+ goto disconnect_all;
+
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_SESSION_IDENTITY,
+ hdev->init_data.session_id,
+ strlen(hdev->init_data.session_id));
+ if (r == 0)
+ goto exit;
+
+disconnect_all:
+ nfc_hci_disconnect_all_gates(hdev);
+
+exit:
+ if (skb)
+ kfree_skb(skb);
+
+ return r;
+}
+
+static int hci_dev_version(struct nfc_hci_dev *hdev)
+{
+ int r;
+ struct sk_buff *skb;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != 3) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
+ hdev->sw_patch = skb->data[0] & 0x0f;
+ hdev->sw_flashlib_major = skb->data[1];
+ hdev->sw_flashlib_minor = skb->data[2];
+
+ kfree_skb(skb);
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ NFC_HCI_ID_MGMT_VERSION_HW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != 3) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
+ hdev->hw_version = skb->data[0] & 0x1f;
+ hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
+ hdev->hw_software = skb->data[1] & 0x3f;
+ hdev->hw_bsid = skb->data[2];
+
+ kfree_skb(skb);
+
+ pr_info("SOFTWARE INFO:\n");
+ pr_info("RomLib : %d\n", hdev->sw_romlib);
+ pr_info("Patch : %d\n", hdev->sw_patch);
+ pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major);
+ pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor);
+ pr_info("HARDWARE INFO:\n");
+ pr_info("Derivative : %d\n", hdev->hw_derivative);
+ pr_info("HW Version : %d\n", hdev->hw_version);
+ pr_info("#MPW : %d\n", hdev->hw_mpw);
+ pr_info("Software : %d\n", hdev->hw_software);
+ pr_info("BSID Version : %d\n", hdev->hw_bsid);
+
+ return 0;
+}
+
+static int hci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+ int r = 0;
+
+ if (hdev->ops->open) {
+ r = hdev->ops->open(hdev);
+ if (r < 0)
+ return r;
+ }
+
+ r = hci_dev_session_init(hdev);
+ if (r < 0)
+ goto exit;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ goto exit;
+
+ if (hdev->ops->hci_ready) {
+ r = hdev->ops->hci_ready(hdev);
+ if (r < 0)
+ goto exit;
+ }
+
+ r = hci_dev_version(hdev);
+ if (r < 0)
+ goto exit;
+
+exit:
+ if (r < 0)
+ if (hdev->ops->close)
+ hdev->ops->close(hdev);
+ return r;
+}
+
+static int hci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->close)
+ hdev->ops->close(hdev);
+
+ memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+ return 0;
+}
+
+static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->start_poll)
+ return hdev->ops->start_poll(hdev, protocols);
+ else
+ return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+}
+
+static void hci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+}
+
+static int hci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
+{
+ return 0;
+}
+
+static void hci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+}
+
+static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+ int r;
+ struct sk_buff *res_skb = NULL;
+
+ pr_debug("target_idx=%d\n", target->idx);
+
+ switch (target->hci_reader_gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ case NFC_HCI_RF_READER_B_GATE:
+ if (hdev->ops->data_exchange) {
+ r = hdev->ops->data_exchange(hdev, target, skb,
+ &res_skb);
+ if (r <= 0) /* handled */
+ break;
+ }
+
+ *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
+ r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ NFC_HCI_WR_XCHG_DATA,
+ skb->data, skb->len, &res_skb);
+ /*
+ * TODO: Check RF Error indicator to make sure data is valid.
+ * It seems that HCI cmd can complete without error, but data
+ * can be invalid if an RF error occured? Ignore for now.
+ */
+ if (r == 0)
+ skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
+ break;
+ default:
+ if (hdev->ops->data_exchange) {
+ r = hdev->ops->data_exchange(hdev, target, skb,
+ &res_skb);
+ if (r == 1)
+ r = -ENOTSUPP;
+ }
+ else
+ r = -ENOTSUPP;
+ }
+
+ kfree_skb(skb);
+
+ cb(cb_context, res_skb, r);
+
+ return 0;
+}
+
+static int hci_check_presence(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->check_presence)
+ return hdev->ops->check_presence(hdev, target);
+
+ return 0;
+}
+
+static struct nfc_ops hci_nfc_ops = {
+ .dev_up = hci_dev_up,
+ .dev_down = hci_dev_down,
+ .start_poll = hci_start_poll,
+ .stop_poll = hci_stop_poll,
+ .activate_target = hci_activate_target,
+ .deactivate_target = hci_deactivate_target,
+ .data_exchange = hci_data_exchange,
+ .check_presence = hci_check_presence,
+};
+
+struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+ struct nfc_hci_init_data *init_data,
+ u32 protocols,
+ int tx_headroom,
+ int tx_tailroom,
+ int max_link_payload)
+{
+ struct nfc_hci_dev *hdev;
+
+ if (ops->xmit == NULL)
+ return NULL;
+
+ if (protocols == 0)
+ return NULL;
+
+ hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL);
+ if (hdev == NULL)
+ return NULL;
+
+ hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
+ tx_headroom + HCI_CMDS_HEADROOM,
+ tx_tailroom);
+ if (!hdev->ndev) {
+ kfree(hdev);
+ return NULL;
+ }
+
+ hdev->ops = ops;
+ hdev->max_data_link_payload = max_link_payload;
+ hdev->init_data = *init_data;
+
+ nfc_set_drvdata(hdev->ndev, hdev);
+
+ memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+ return hdev;
+}
+EXPORT_SYMBOL(nfc_hci_allocate_device);
+
+void nfc_hci_free_device(struct nfc_hci_dev *hdev)
+{
+ nfc_free_device(hdev->ndev);
+ kfree(hdev);
+}
+EXPORT_SYMBOL(nfc_hci_free_device);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev)
+{
+ struct device *dev = &hdev->ndev->dev;
+ const char *devname = dev_name(dev);
+ char name[32];
+ int r = 0;
+
+ mutex_init(&hdev->msg_tx_mutex);
+
+ INIT_LIST_HEAD(&hdev->msg_tx_queue);
+
+ INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
+ snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
+ hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (hdev->msg_tx_wq == NULL) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ init_timer(&hdev->cmd_timer);
+ hdev->cmd_timer.data = (unsigned long)hdev;
+ hdev->cmd_timer.function = nfc_hci_cmd_timeout;
+
+ skb_queue_head_init(&hdev->rx_hcp_frags);
+
+ INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
+ snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
+ hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (hdev->msg_rx_wq == NULL) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ skb_queue_head_init(&hdev->msg_rx_queue);
+
+ r = nfc_register_device(hdev->ndev);
+
+exit:
+ if (r < 0) {
+ if (hdev->msg_tx_wq)
+ destroy_workqueue(hdev->msg_tx_wq);
+ if (hdev->msg_rx_wq)
+ destroy_workqueue(hdev->msg_rx_wq);
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(nfc_hci_register_device);
+
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
+{
+ struct hci_msg *msg;
+
+ skb_queue_purge(&hdev->rx_hcp_frags);
+ skb_queue_purge(&hdev->msg_rx_queue);
+
+ while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
+ msg_l)) != NULL) {
+ list_del(&msg->msg_l);
+ skb_queue_purge(&msg->msg_frags);
+ kfree(msg);
+ }
+
+ del_timer_sync(&hdev->cmd_timer);
+
+ nfc_unregister_device(hdev->ndev);
+
+ destroy_workqueue(hdev->msg_tx_wq);
+
+ destroy_workqueue(hdev->msg_rx_wq);
+}
+EXPORT_SYMBOL(nfc_hci_unregister_device);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata)
+{
+ hdev->clientdata = clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_set_clientdata);
+
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
+{
+ return hdev->clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_get_clientdata);
+
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hcp_packet *packet;
+ u8 type;
+ u8 instruction;
+ struct sk_buff *hcp_skb;
+ u8 pipe;
+ struct sk_buff *frag_skb;
+ int msg_len;
+
+ if (skb == NULL) {
+ /* TODO ELa: lower layer had permanent failure, need to
+ * propagate that up
+ */
+
+ skb_queue_purge(&hdev->rx_hcp_frags);
+
+ return;
+ }
+
+ packet = (struct hcp_packet *)skb->data;
+ if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
+ skb_queue_tail(&hdev->rx_hcp_frags, skb);
+ return;
+ }
+
+ /* it's the last fragment. Does it need re-aggregation? */
+ if (skb_queue_len(&hdev->rx_hcp_frags)) {
+ pipe = packet->header & NFC_HCI_FRAGMENT;
+ skb_queue_tail(&hdev->rx_hcp_frags, skb);
+
+ msg_len = 0;
+ skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+ msg_len += (frag_skb->len -
+ NFC_HCI_HCP_PACKET_HEADER_LEN);
+ }
+
+ hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
+ msg_len, GFP_KERNEL);
+ if (hcp_skb == NULL) {
+ /* TODO ELa: cannot deliver HCP message. How to
+ * propagate error up?
+ */
+ }
+
+ *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+ skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+ msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
+ memcpy(skb_put(hcp_skb, msg_len),
+ frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+ msg_len);
+ }
+
+ skb_queue_purge(&hdev->rx_hcp_frags);
+ } else {
+ packet->header &= NFC_HCI_FRAGMENT;
+ hcp_skb = skb;
+ }
+
+ /* if this is a response, dispatch immediately to
+ * unblock waiting cmd context. Otherwise, enqueue to dispatch
+ * in separate context where handler can also execute command.
+ */
+ packet = (struct hcp_packet *)hcp_skb->data;
+ type = HCP_MSG_GET_TYPE(packet->message.header);
+ if (type == NFC_HCI_HCP_RESPONSE) {
+ pipe = packet->header;
+ instruction = HCP_MSG_GET_CMD(packet->message.header);
+ skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
+ NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+ nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
+ } else {
+ skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
+ queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
+ }
+}
+EXPORT_SYMBOL(nfc_hci_recv_frame);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
new file mode 100644
index 000000000000..45f2fe4fd486
--- /dev/null
+++ b/net/nfc/hci/hci.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_HCI_H
+#define __LOCAL_HCI_H
+
+struct gate_pipe_map {
+ u8 gate;
+ u8 pipe;
+};
+
+struct hcp_message {
+ u8 header; /* type -cmd,evt,rsp- + instruction */
+ u8 data[];
+} __packed;
+
+struct hcp_packet {
+ u8 header; /* cbit+pipe */
+ struct hcp_message message;
+} __packed;
+
+/*
+ * HCI command execution completion callback.
+ * result will be one of the HCI response codes.
+ * skb contains the response data and must be disposed.
+ */
+typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb, void *cb_data);
+
+struct hcp_exec_waiter {
+ wait_queue_head_t *wq;
+ bool exec_complete;
+ int exec_result;
+ struct sk_buff *result_skb;
+};
+
+struct hci_msg {
+ struct list_head msg_l;
+ struct sk_buff_head msg_frags;
+ bool wait_response;
+ hci_cmd_cb_t cb;
+ void *cb_context;
+ unsigned long completion_delay;
+};
+
+struct hci_create_pipe_params {
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+} __packed;
+
+struct hci_create_pipe_resp {
+ u8 src_host;
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+ u8 pipe;
+} __packed;
+
+#define NFC_HCI_FRAGMENT 0x7f
+
+#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
+#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
+#define HCP_MSG_GET_CMD(header) (header & 0x3f)
+
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+ u8 type, u8 instruction,
+ const u8 *payload, size_t payload_len,
+ hci_cmd_cb_t cb, void *cb_data,
+ unsigned long completion_delay);
+
+u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
+
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+ u8 instruction, struct sk_buff *skb);
+
+/* HCP headers */
+#define NFC_HCI_HCP_PACKET_HEADER_LEN 1
+#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1
+#define NFC_HCI_HCP_HEADER_LEN 2
+
+/* HCP types */
+#define NFC_HCI_HCP_COMMAND 0x00
+#define NFC_HCI_HCP_EVENT 0x01
+#define NFC_HCI_HCP_RESPONSE 0x02
+
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER 0x01
+#define NFC_HCI_ANY_GET_PARAMETER 0x02
+#define NFC_HCI_ANY_OPEN_PIPE 0x03
+#define NFC_HCI_ANY_CLOSE_PIPE 0x04
+
+/* Reader RF commands */
+#define NFC_HCI_WR_XCHG_DATA 0x10
+
+/* Admin commands */
+#define NFC_HCI_ADM_CREATE_PIPE 0x10
+#define NFC_HCI_ADM_DELETE_PIPE 0x11
+#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12
+#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13
+#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14
+#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
+
+/* Generic responses */
+#define NFC_HCI_ANY_OK 0x00
+#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01
+#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
+#define NFC_HCI_ANY_E_NOK 0x03
+#define NFC_HCI_ANY_E_PIPES_FULL 0x04
+#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
+#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06
+#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
+#define NFC_HCI_ANY_E_INHIBITED 0x08
+#define NFC_HCI_ANY_E_TIMEOUT 0x09
+#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
+#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
+
+/* Pipes */
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_LINK_MGMT_PIPE 0x00
+#define NFC_HCI_ADMIN_PIPE 0x01
+
+#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
new file mode 100644
index 000000000000..7212cf2c5785
--- /dev/null
+++ b/net/nfc/hci/hcp.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+/*
+ * Payload is the HCP message data only. Instruction will be prepended.
+ * Guarantees that cb will be called upon completion or timeout delay
+ * counted from the moment the cmd is sent to the transport.
+ */
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+ u8 type, u8 instruction,
+ const u8 *payload, size_t payload_len,
+ hci_cmd_cb_t cb, void *cb_data,
+ unsigned long completion_delay)
+{
+ struct nfc_dev *ndev = hdev->ndev;
+ struct hci_msg *cmd;
+ const u8 *ptr = payload;
+ int hci_len, err;
+ bool firstfrag = true;
+
+ cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->msg_l);
+ skb_queue_head_init(&cmd->msg_frags);
+ cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
+ cmd->cb = cb;
+ cmd->cb_context = cb_data;
+ cmd->completion_delay = completion_delay;
+
+ hci_len = payload_len + 1;
+ while (hci_len > 0) {
+ struct sk_buff *skb;
+ int skb_len, data_link_len;
+ struct hcp_packet *packet;
+
+ if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <=
+ hdev->max_data_link_payload)
+ data_link_len = hci_len;
+ else
+ data_link_len = hdev->max_data_link_payload -
+ NFC_HCI_HCP_PACKET_HEADER_LEN;
+
+ skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN +
+ data_link_len + ndev->tx_tailroom;
+ hci_len -= data_link_len;
+
+ skb = alloc_skb(skb_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto out_skb_err;
+ }
+ skb_reserve(skb, ndev->tx_headroom);
+
+ skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
+
+ /* Only the last fragment will have the cb bit set to 1 */
+ packet = (struct hcp_packet *)skb->data;
+ packet->header = pipe;
+ if (firstfrag) {
+ firstfrag = false;
+ packet->message.header = HCP_HEADER(type, instruction);
+ if (ptr) {
+ memcpy(packet->message.data, ptr,
+ data_link_len - 1);
+ ptr += data_link_len - 1;
+ }
+ } else {
+ memcpy(&packet->message, ptr, data_link_len);
+ ptr += data_link_len;
+ }
+
+ /* This is the last fragment, set the cb bit */
+ if (hci_len == 0)
+ packet->header |= ~NFC_HCI_FRAGMENT;
+
+ skb_queue_tail(&cmd->msg_frags, skb);
+ }
+
+ mutex_lock(&hdev->msg_tx_mutex);
+ list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
+ mutex_unlock(&hdev->msg_tx_mutex);
+
+ queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+
+ return 0;
+
+out_skb_err:
+ skb_queue_purge(&cmd->msg_frags);
+ kfree(cmd);
+
+ return err;
+}
+
+u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ int gate;
+
+ for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
+ if (hdev->gate2pipe[gate] == pipe)
+ return gate;
+
+ return 0xff;
+}
+
+/*
+ * Receive hcp message for pipe, with type and cmd.
+ * skb contains optional message data only.
+ */
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+ u8 instruction, struct sk_buff *skb)
+{
+ switch (type) {
+ case NFC_HCI_HCP_RESPONSE:
+ nfc_hci_resp_received(hdev, instruction, skb);
+ break;
+ case NFC_HCI_HCP_COMMAND:
+ nfc_hci_cmd_received(hdev, pipe, instruction, skb);
+ break;
+ case NFC_HCI_HCP_EVENT:
+ nfc_hci_event_received(hdev, pipe, instruction, skb);
+ break;
+ default:
+ pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
+ type, instruction);
+ kfree_skb(skb);
+ break;
+ }
+}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
new file mode 100644
index 000000000000..5665dc6d893a
--- /dev/null
+++ b/net/nfc/hci/shdlc.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
+
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/wait.h>
+#include <linux/crc-ccitt.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/shdlc.h>
+
+#define SHDLC_LLC_HEAD_ROOM 2
+#define SHDLC_LLC_TAIL_ROOM 2
+
+#define SHDLC_MAX_WINDOW 4
+#define SHDLC_SREJ_SUPPORT false
+
+#define SHDLC_CONTROL_HEAD_MASK 0xe0
+#define SHDLC_CONTROL_HEAD_I 0x80
+#define SHDLC_CONTROL_HEAD_I2 0xa0
+#define SHDLC_CONTROL_HEAD_S 0xc0
+#define SHDLC_CONTROL_HEAD_U 0xe0
+
+#define SHDLC_CONTROL_NS_MASK 0x38
+#define SHDLC_CONTROL_NR_MASK 0x07
+#define SHDLC_CONTROL_TYPE_MASK 0x18
+
+#define SHDLC_CONTROL_M_MASK 0x1f
+
+enum sframe_type {
+ S_FRAME_RR = 0x00,
+ S_FRAME_REJ = 0x01,
+ S_FRAME_RNR = 0x02,
+ S_FRAME_SREJ = 0x03
+};
+
+enum uframe_modifier {
+ U_FRAME_UA = 0x06,
+ U_FRAME_RSET = 0x19
+};
+
+#define SHDLC_CONNECT_VALUE_MS 5
+#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4)
+#define SHDLC_T2_VALUE_MS 300
+
+#define SHDLC_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, skb->data, skb->len, 0); \
+} while (0)
+
+/* checks x < y <= z modulo 8 */
+static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
+{
+ if (x < z)
+ return ((x < y) && (y <= z)) ? true : false;
+ else
+ return ((y > x) || (y <= z)) ? true : false;
+}
+
+/* checks x <= y < z modulo 8 */
+static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
+{
+ if (x <= z)
+ return ((x <= y) && (y < z)) ? true : false;
+ else /* x > z -> z+8 > x */
+ return ((y >= x) || (y < z)) ? true : false;
+}
+
+static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
+ int payload_len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
+ shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
+ payload_len, GFP_KERNEL);
+ if (skb)
+ skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
+
+ return skb;
+}
+
+static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
+{
+ u16 crc;
+ int len;
+
+ len = skb->len + 2;
+ *skb_push(skb, 1) = len;
+
+ crc = crc_ccitt(0xffff, skb->data, skb->len);
+ crc = ~crc;
+ *skb_put(skb, 1) = crc & 0xff;
+ *skb_put(skb, 1) = crc >> 8;
+}
+
+/* immediately sends an S frame. */
+static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
+ enum sframe_type sframe_type, int nr)
+{
+ int r;
+ struct sk_buff *skb;
+
+ pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
+
+ skb = nfc_shdlc_alloc_skb(shdlc, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
+
+ nfc_shdlc_add_len_crc(skb);
+
+ r = shdlc->ops->xmit(shdlc, skb);
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+/* immediately sends an U frame. skb may contain optional payload */
+static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
+ struct sk_buff *skb,
+ enum uframe_modifier uframe_modifier)
+{
+ int r;
+
+ pr_debug("uframe_modifier=%d\n", uframe_modifier);
+
+ *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
+
+ nfc_shdlc_add_len_crc(skb);
+
+ r = shdlc->ops->xmit(shdlc, skb);
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+/*
+ * Free ack_pending frames until y_nr - 1, and reset t2 according to
+ * the remaining oldest ack_pending frame sent time
+ */
+static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
+{
+ struct sk_buff *skb;
+ int dnr = shdlc->dnr; /* MUST initially be < y_nr */
+
+ pr_debug("release ack pending up to frame %d excluded\n", y_nr);
+
+ while (dnr != y_nr) {
+ pr_debug("release ack pending frame %d\n", dnr);
+
+ skb = skb_dequeue(&shdlc->ack_pending_q);
+ kfree_skb(skb);
+
+ dnr = (dnr + 1) % 8;
+ }
+
+ if (skb_queue_empty(&shdlc->ack_pending_q)) {
+ if (shdlc->t2_active) {
+ del_timer_sync(&shdlc->t2_timer);
+ shdlc->t2_active = false;
+
+ pr_debug
+ ("All sent frames acked. Stopped T2(retransmit)\n");
+ }
+ } else {
+ skb = skb_peek(&shdlc->ack_pending_q);
+
+ mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
+ msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+ shdlc->t2_active = true;
+
+ pr_debug
+ ("Start T2(retransmit) for remaining unacked sent frames\n");
+ }
+}
+
+/*
+ * Receive validated frames from lower layer. skb contains HCI payload only.
+ * Handle according to algorithm at spec:10.8.2
+ */
+static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
+ struct sk_buff *skb, int ns, int nr)
+{
+ int x_ns = ns;
+ int y_nr = nr;
+
+ pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
+
+ if (shdlc->state != SHDLC_CONNECTED)
+ goto exit;
+
+ if (x_ns != shdlc->nr) {
+ nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
+ goto exit;
+ }
+
+ if (shdlc->t1_active == false) {
+ shdlc->t1_active = true;
+ mod_timer(&shdlc->t1_timer,
+ msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
+ pr_debug("(re)Start T1(send ack)\n");
+ }
+
+ if (skb->len) {
+ nfc_hci_recv_frame(shdlc->hdev, skb);
+ skb = NULL;
+ }
+
+ shdlc->nr = (shdlc->nr + 1) % 8;
+
+ if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ nfc_shdlc_reset_t2(shdlc, y_nr);
+
+ shdlc->dnr = y_nr;
+ }
+
+exit:
+ if (skb)
+ kfree_skb(skb);
+}
+
+static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
+{
+ pr_debug("remote acked up to frame %d excluded\n", y_nr);
+
+ if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ nfc_shdlc_reset_t2(shdlc, y_nr);
+ shdlc->dnr = y_nr;
+ }
+}
+
+static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ pr_debug("ns reset to %d\n", shdlc->dnr);
+
+ while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
+ skb_pull(skb, 2); /* remove len+control */
+ skb_trim(skb, skb->len - 2); /* remove crc */
+ skb_queue_head(&shdlc->send_q, skb);
+ }
+ shdlc->ns = shdlc->dnr;
+}
+
+static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
+{
+ struct sk_buff *skb;
+
+ pr_debug("remote asks retransmition from frame %d\n", y_nr);
+
+ if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ if (shdlc->t2_active) {
+ del_timer_sync(&shdlc->t2_timer);
+ shdlc->t2_active = false;
+ pr_debug("Stopped T2(retransmit)\n");
+ }
+
+ if (shdlc->dnr != y_nr) {
+ while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
+ skb = skb_dequeue(&shdlc->ack_pending_q);
+ kfree_skb(skb);
+ }
+ }
+
+ nfc_shdlc_requeue_ack_pending(shdlc);
+ }
+}
+
+/* See spec RR:10.8.3 REJ:10.8.4 */
+static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
+ enum sframe_type s_frame_type, int nr)
+{
+ struct sk_buff *skb;
+
+ if (shdlc->state != SHDLC_CONNECTED)
+ return;
+
+ switch (s_frame_type) {
+ case S_FRAME_RR:
+ nfc_shdlc_rcv_ack(shdlc, nr);
+ if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
+ shdlc->rnr = false;
+ if (shdlc->send_q.qlen == 0) {
+ skb = nfc_shdlc_alloc_skb(shdlc, 0);
+ if (skb)
+ skb_queue_tail(&shdlc->send_q, skb);
+ }
+ }
+ break;
+ case S_FRAME_REJ:
+ nfc_shdlc_rcv_rej(shdlc, nr);
+ break;
+ case S_FRAME_RNR:
+ nfc_shdlc_rcv_ack(shdlc, nr);
+ shdlc->rnr = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
+{
+ pr_debug("result=%d\n", r);
+
+ del_timer_sync(&shdlc->connect_timer);
+
+ if (r == 0) {
+ shdlc->ns = 0;
+ shdlc->nr = 0;
+ shdlc->dnr = 0;
+
+ shdlc->state = SHDLC_CONNECTED;
+ } else {
+ shdlc->state = SHDLC_DISCONNECTED;
+
+ /*
+ * TODO: Could it be possible that there are pending
+ * executing commands that are waiting for connect to complete
+ * before they can be carried? As connect is a blocking
+ * operation, it would require that the userspace process can
+ * send commands on the same device from a second thread before
+ * the device is up. I don't think that is possible, is it?
+ */
+ }
+
+ shdlc->connect_result = r;
+
+ wake_up(shdlc->connect_wq);
+}
+
+static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ pr_debug("\n");
+
+ skb = nfc_shdlc_alloc_skb(shdlc, 2);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
+ *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
+
+ return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
+}
+
+static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ pr_debug("\n");
+
+ skb = nfc_shdlc_alloc_skb(shdlc, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
+}
+
+static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
+ struct sk_buff *skb,
+ enum uframe_modifier u_frame_modifier)
+{
+ u8 w = SHDLC_MAX_WINDOW;
+ bool srej_support = SHDLC_SREJ_SUPPORT;
+ int r;
+
+ pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
+
+ switch (u_frame_modifier) {
+ case U_FRAME_RSET:
+ if (shdlc->state == SHDLC_NEGOCIATING) {
+ /* we sent RSET, but chip wants to negociate */
+ if (skb->len > 0)
+ w = skb->data[0];
+
+ if (skb->len > 1)
+ srej_support = skb->data[1] & 0x01 ? true :
+ false;
+
+ if ((w <= SHDLC_MAX_WINDOW) &&
+ (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
+ shdlc->w = w;
+ shdlc->srej_support = srej_support;
+ r = nfc_shdlc_connect_send_ua(shdlc);
+ nfc_shdlc_connect_complete(shdlc, r);
+ }
+ } else if (shdlc->state > SHDLC_NEGOCIATING) {
+ /*
+ * TODO: Chip wants to reset link
+ * send ua, empty skb lists, reset counters
+ * propagate info to HCI layer
+ */
+ }
+ break;
+ case U_FRAME_UA:
+ if ((shdlc->state == SHDLC_CONNECTING &&
+ shdlc->connect_tries > 0) ||
+ (shdlc->state == SHDLC_NEGOCIATING))
+ nfc_shdlc_connect_complete(shdlc, 0);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+}
+
+static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+ u8 control;
+ int nr;
+ int ns;
+ enum sframe_type s_frame_type;
+ enum uframe_modifier u_frame_modifier;
+
+ if (shdlc->rcv_q.qlen)
+ pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
+
+ while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
+ control = skb->data[0];
+ skb_pull(skb, 1);
+ switch (control & SHDLC_CONTROL_HEAD_MASK) {
+ case SHDLC_CONTROL_HEAD_I:
+ case SHDLC_CONTROL_HEAD_I2:
+ ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
+ nr = control & SHDLC_CONTROL_NR_MASK;
+ nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
+ break;
+ case SHDLC_CONTROL_HEAD_S:
+ s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
+ nr = control & SHDLC_CONTROL_NR_MASK;
+ nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
+ kfree_skb(skb);
+ break;
+ case SHDLC_CONTROL_HEAD_U:
+ u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
+ nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
+ break;
+ default:
+ pr_err("UNKNOWN Control=%d\n", control);
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int nfc_shdlc_w_used(int ns, int dnr)
+{
+ int unack_count;
+
+ if (dnr <= ns)
+ unack_count = ns - dnr;
+ else
+ unack_count = 8 - dnr + ns;
+
+ return unack_count;
+}
+
+/* Send frames according to algorithm at spec:10.8.1 */
+static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+ int r;
+ unsigned long time_sent;
+
+ if (shdlc->send_q.qlen)
+ pr_debug
+ ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
+ shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
+ shdlc->rnr == false ? "false" : "true",
+ shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+ shdlc->ack_pending_q.qlen);
+
+ while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
+ (shdlc->rnr == false)) {
+
+ if (shdlc->t1_active) {
+ del_timer_sync(&shdlc->t1_timer);
+ shdlc->t1_active = false;
+ pr_debug("Stopped T1(send ack)\n");
+ }
+
+ skb = skb_dequeue(&shdlc->send_q);
+
+ *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
+ shdlc->nr;
+
+ pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
+ shdlc->nr);
+ /* SHDLC_DUMP_SKB("shdlc frame written", skb); */
+
+ nfc_shdlc_add_len_crc(skb);
+
+ r = shdlc->ops->xmit(shdlc, skb);
+ if (r < 0) {
+ /*
+ * TODO: Cannot send, shdlc machine is dead, we
+ * must propagate the information up to HCI.
+ */
+ shdlc->hard_fault = r;
+ break;
+ }
+
+ shdlc->ns = (shdlc->ns + 1) % 8;
+
+ time_sent = jiffies;
+ *(unsigned long *)skb->cb = time_sent;
+
+ skb_queue_tail(&shdlc->ack_pending_q, skb);
+
+ if (shdlc->t2_active == false) {
+ shdlc->t2_active = true;
+ mod_timer(&shdlc->t2_timer, time_sent +
+ msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+ pr_debug("Started T2 (retransmit)\n");
+ }
+ }
+}
+
+static void nfc_shdlc_connect_timeout(unsigned long data)
+{
+ struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+ pr_debug("\n");
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_t1_timeout(unsigned long data)
+{
+ struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+ pr_debug("SoftIRQ: need to send ack\n");
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_t2_timeout(unsigned long data)
+{
+ struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+ pr_debug("SoftIRQ: need to retransmit\n");
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_sm_work(struct work_struct *work)
+{
+ struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
+ int r;
+
+ pr_debug("\n");
+
+ mutex_lock(&shdlc->state_mutex);
+
+ switch (shdlc->state) {
+ case SHDLC_DISCONNECTED:
+ skb_queue_purge(&shdlc->rcv_q);
+ skb_queue_purge(&shdlc->send_q);
+ skb_queue_purge(&shdlc->ack_pending_q);
+ break;
+ case SHDLC_CONNECTING:
+ if (shdlc->connect_tries++ < 5)
+ r = nfc_shdlc_connect_initiate(shdlc);
+ else
+ r = -ETIME;
+ if (r < 0)
+ nfc_shdlc_connect_complete(shdlc, r);
+ else {
+ mod_timer(&shdlc->connect_timer, jiffies +
+ msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
+
+ shdlc->state = SHDLC_NEGOCIATING;
+ }
+ break;
+ case SHDLC_NEGOCIATING:
+ if (timer_pending(&shdlc->connect_timer) == 0) {
+ shdlc->state = SHDLC_CONNECTING;
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+ }
+
+ nfc_shdlc_handle_rcv_queue(shdlc);
+ break;
+ case SHDLC_CONNECTED:
+ nfc_shdlc_handle_rcv_queue(shdlc);
+ nfc_shdlc_handle_send_queue(shdlc);
+
+ if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
+ pr_debug
+ ("Handle T1(send ack) elapsed (T1 now inactive)\n");
+
+ shdlc->t1_active = false;
+ r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
+ shdlc->nr);
+ if (r < 0)
+ shdlc->hard_fault = r;
+ }
+
+ if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
+ pr_debug
+ ("Handle T2(retransmit) elapsed (T2 inactive)\n");
+
+ shdlc->t2_active = false;
+
+ nfc_shdlc_requeue_ack_pending(shdlc);
+ nfc_shdlc_handle_send_queue(shdlc);
+ }
+
+ if (shdlc->hard_fault) {
+ /*
+ * TODO: Handle hard_fault that occured during
+ * this invocation of the shdlc worker
+ */
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&shdlc->state_mutex);
+}
+
+/*
+ * Called from syscall context to establish shdlc link. Sleeps until
+ * link is ready or failure.
+ */
+static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
+
+ pr_debug("\n");
+
+ mutex_lock(&shdlc->state_mutex);
+
+ shdlc->state = SHDLC_CONNECTING;
+ shdlc->connect_wq = &connect_wq;
+ shdlc->connect_tries = 0;
+ shdlc->connect_result = 1;
+
+ mutex_unlock(&shdlc->state_mutex);
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+
+ wait_event(connect_wq, shdlc->connect_result != 1);
+
+ return shdlc->connect_result;
+}
+
+static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
+{
+ pr_debug("\n");
+
+ mutex_lock(&shdlc->state_mutex);
+
+ shdlc->state = SHDLC_DISCONNECTED;
+
+ mutex_unlock(&shdlc->state_mutex);
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+/*
+ * Receive an incoming shdlc frame. Frame has already been crc-validated.
+ * skb contains only LLC header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+{
+ if (skb == NULL) {
+ pr_err("NULL Frame -> link is dead\n");
+ shdlc->hard_fault = -EREMOTEIO;
+ } else {
+ SHDLC_DUMP_SKB("incoming frame", skb);
+ skb_queue_tail(&shdlc->rcv_q, skb);
+ }
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+EXPORT_SYMBOL(nfc_shdlc_recv_frame);
+
+static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+ int r;
+
+ pr_debug("\n");
+
+ if (shdlc->ops->open) {
+ r = shdlc->ops->open(shdlc);
+ if (r < 0)
+ return r;
+ }
+
+ r = nfc_shdlc_connect(shdlc);
+ if (r < 0 && shdlc->ops->close)
+ shdlc->ops->close(shdlc);
+
+ return r;
+}
+
+static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("\n");
+
+ nfc_shdlc_disconnect(shdlc);
+
+ if (shdlc->ops->close)
+ shdlc->ops->close(shdlc);
+}
+
+static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+ int r = 0;
+
+ pr_debug("\n");
+
+ if (shdlc->ops->hci_ready)
+ r = shdlc->ops->hci_ready(shdlc);
+
+ return r;
+}
+
+static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
+
+ skb_queue_tail(&shdlc->send_q, skb);
+
+ queue_work(shdlc->sm_wq, &shdlc->sm_work);
+
+ return 0;
+}
+
+static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("\n");
+
+ if (shdlc->ops->start_poll)
+ return shdlc->ops->start_poll(shdlc, protocols);
+
+ return 0;
+}
+
+static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ if (shdlc->ops->target_from_gate)
+ return shdlc->ops->target_from_gate(shdlc, gate, target);
+
+ return -EPERM;
+}
+
+static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
+ u8 gate,
+ struct nfc_target *target)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("\n");
+
+ if (shdlc->ops->complete_target_discovered)
+ return shdlc->ops->complete_target_discovered(shdlc, gate,
+ target);
+
+ return 0;
+}
+
+static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
+ struct nfc_target *target,
+ struct sk_buff *skb,
+ struct sk_buff **res_skb)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ if (shdlc->ops->data_exchange)
+ return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
+
+ return -EPERM;
+}
+
+static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
+ struct nfc_target *target)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ if (shdlc->ops->check_presence)
+ return shdlc->ops->check_presence(shdlc, target);
+
+ return 0;
+}
+
+static struct nfc_hci_ops shdlc_ops = {
+ .open = nfc_shdlc_open,
+ .close = nfc_shdlc_close,
+ .hci_ready = nfc_shdlc_hci_ready,
+ .xmit = nfc_shdlc_xmit,
+ .start_poll = nfc_shdlc_start_poll,
+ .target_from_gate = nfc_shdlc_target_from_gate,
+ .complete_target_discovered = nfc_shdlc_complete_target_discovered,
+ .data_exchange = nfc_shdlc_data_exchange,
+ .check_presence = nfc_shdlc_check_presence,
+};
+
+struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
+ struct nfc_hci_init_data *init_data,
+ u32 protocols,
+ int tx_headroom, int tx_tailroom,
+ int max_link_payload, const char *devname)
+{
+ struct nfc_shdlc *shdlc;
+ int r;
+ char name[32];
+
+ if (ops->xmit == NULL)
+ return NULL;
+
+ shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
+ if (shdlc == NULL)
+ return NULL;
+
+ mutex_init(&shdlc->state_mutex);
+ shdlc->ops = ops;
+ shdlc->state = SHDLC_DISCONNECTED;
+
+ init_timer(&shdlc->connect_timer);
+ shdlc->connect_timer.data = (unsigned long)shdlc;
+ shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
+
+ init_timer(&shdlc->t1_timer);
+ shdlc->t1_timer.data = (unsigned long)shdlc;
+ shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
+
+ init_timer(&shdlc->t2_timer);
+ shdlc->t2_timer.data = (unsigned long)shdlc;
+ shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
+
+ shdlc->w = SHDLC_MAX_WINDOW;
+ shdlc->srej_support = SHDLC_SREJ_SUPPORT;
+
+ skb_queue_head_init(&shdlc->rcv_q);
+ skb_queue_head_init(&shdlc->send_q);
+ skb_queue_head_init(&shdlc->ack_pending_q);
+
+ INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
+ snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
+ shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (shdlc->sm_wq == NULL)
+ goto err_allocwq;
+
+ shdlc->client_headroom = tx_headroom;
+ shdlc->client_tailroom = tx_tailroom;
+
+ shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
+ tx_headroom + SHDLC_LLC_HEAD_ROOM,
+ tx_tailroom + SHDLC_LLC_TAIL_ROOM,
+ max_link_payload);
+ if (shdlc->hdev == NULL)
+ goto err_allocdev;
+
+ nfc_hci_set_clientdata(shdlc->hdev, shdlc);
+
+ r = nfc_hci_register_device(shdlc->hdev);
+ if (r < 0)
+ goto err_regdev;
+
+ return shdlc;
+
+err_regdev:
+ nfc_hci_free_device(shdlc->hdev);
+
+err_allocdev:
+ destroy_workqueue(shdlc->sm_wq);
+
+err_allocwq:
+ kfree(shdlc);
+
+ return NULL;
+}
+EXPORT_SYMBOL(nfc_shdlc_allocate);
+
+void nfc_shdlc_free(struct nfc_shdlc *shdlc)
+{
+ pr_debug("\n");
+
+ /* TODO: Check that this cannot be called while still in use */
+
+ nfc_hci_unregister_device(shdlc->hdev);
+ nfc_hci_free_device(shdlc->hdev);
+
+ destroy_workqueue(shdlc->sm_wq);
+
+ skb_queue_purge(&shdlc->rcv_q);
+ skb_queue_purge(&shdlc->send_q);
+ skb_queue_purge(&shdlc->ack_pending_q);
+
+ kfree(shdlc);
+}
+EXPORT_SYMBOL(nfc_shdlc_free);
+
+void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
+{
+ pr_debug("\n");
+
+ shdlc->clientdata = clientdata;
+}
+EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
+
+void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
+{
+ return shdlc->clientdata;
+}
+EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
+
+struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
+{
+ return shdlc->hdev;
+}
+EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 7b76eb7192f3..bf8ae4f0b90c 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -102,7 +102,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
length = llcp_tlv_length[type];
if (length == 0 && value_length == 0)
return NULL;
- else
+ else if (length == 0)
length = value_length;
*tlv_length = 2 + length;
@@ -248,7 +248,7 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
- skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC);
+ skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
skb_queue_tail(&local->tx_queue, skb);
@@ -416,7 +416,7 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
- skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM);
+ skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
memcpy(skb_put(skb, 1), &reason, 1);
@@ -474,7 +474,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
while (remaining_len > 0) {
- frag_len = min_t(u16, local->remote_miu, remaining_len);
+ frag_len = min_t(size_t, local->remote_miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
@@ -488,7 +488,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
- skb_queue_head(&sock->tx_queue, pdu);
+ skb_queue_tail(&sock->tx_queue, pdu);
lock_sock(sk);
@@ -497,12 +497,12 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
release_sock(sk);
remaining_len -= frag_len;
- msg_ptr += len;
+ msg_ptr += frag_len;
}
kfree(msg_data);
- return 0;
+ return len;
}
int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
@@ -522,7 +522,7 @@ int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
skb_put(skb, LLCP_SEQUENCE_SIZE);
- skb->data[2] = sock->recv_n % 16;
+ skb->data[2] = sock->recv_n;
skb_queue_head(&local->tx_queue, skb);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 17a578f641f1..42994fac26d6 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -307,6 +307,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
u8 *gb_cur, *version_tlv, version, version_length;
u8 *lto_tlv, lto, lto_length;
u8 *wks_tlv, wks_length;
+ u8 *miux_tlv, miux_length;
+ __be16 miux;
u8 gb_len = 0;
version = LLCP_VERSION_11;
@@ -316,7 +318,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
/* 1500 ms */
lto = 150;
- lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length);
+ lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
gb_len += lto_length;
pr_debug("Local wks 0x%lx\n", local->local_wks);
@@ -324,6 +326,11 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
&wks_length);
gb_len += wks_length;
+ miux = cpu_to_be16(LLCP_MAX_MIUX);
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ &miux_length);
+ gb_len += miux_length;
+
gb_len += ARRAY_SIZE(llcp_magic);
if (gb_len > NFC_MAX_GT_LEN) {
@@ -345,6 +352,9 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
memcpy(gb_cur, wks_tlv, wks_length);
gb_cur += wks_length;
+ memcpy(gb_cur, miux_tlv, miux_length);
+ gb_cur += miux_length;
+
kfree(version_tlv);
kfree(lto_tlv);
@@ -388,6 +398,9 @@ static void nfc_llcp_tx_work(struct work_struct *work)
skb = skb_dequeue(&local->tx_queue);
if (skb != NULL) {
pr_debug("Sending pending skb\n");
+ print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
+ 16, 1, skb->data, skb->len, true);
+
nfc_data_exchange(local->dev, local->target_idx,
skb, nfc_llcp_recv, local);
} else {
@@ -425,7 +438,7 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu)
static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
{
- pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16);
+ pdu->data[2] = (sock->send_n << 4) | (sock->recv_n);
sock->send_n = (sock->send_n + 1) % 16;
sock->recv_ack_n = (sock->recv_n - 1) % 16;
}
@@ -435,6 +448,8 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
{
struct nfc_llcp_sock *sock, *llcp_sock, *n;
+ pr_debug("ssap dsap %d %d\n", ssap, dsap);
+
if (ssap == 0 && dsap == 0)
return NULL;
@@ -770,6 +785,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
u8 dsap, ssap;
dsap = nfc_llcp_dsap(skb);
@@ -788,10 +804,14 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
}
llcp_sock->dsap = ssap;
+ sk = &llcp_sock->sk;
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
skb->len - LLCP_HEADER_SIZE);
+ sk->sk_state = LLCP_CONNECTED;
+ sk->sk_state_change(sk);
+
nfc_llcp_sock_put(llcp_sock);
}
@@ -814,6 +834,10 @@ static void nfc_llcp_rx_work(struct work_struct *work)
pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+ if (ptype != LLCP_PDU_SYMM)
+ print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
+ 16, 1, skb->data, skb->len, true);
+
switch (ptype) {
case LLCP_PDU_SYMM:
pr_debug("SYMM\n");
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index c13e02ebdef9..3f339b19d140 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -27,6 +27,42 @@
#include "../nfc.h"
#include "llcp.h"
+static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+
+ pr_debug("sk %p", sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (sk->sk_state != state) {
+ if (!timeo) {
+ err = -EINPROGRESS;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static struct proto llcp_sock_proto = {
.name = "NFC_LLCP",
.owner = THIS_MODULE,
@@ -304,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
mask |= POLLERR;
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN;
+ mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == LLCP_CLOSED)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ if (sock_writeable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ pr_debug("mask 0x%x\n", mask);
+
return mask;
}
@@ -462,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
if (ret)
goto put_dev;
- sk->sk_state = LLCP_CONNECTED;
+ ret = sock_wait_state(sk, LLCP_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+ if (ret)
+ goto put_dev;
release_sock(sk);
+
return 0;
put_dev:
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9ec065bb9ee1..d560e6f13072 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -436,16 +436,16 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
-static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
- __u32 protocol)
+static int nci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, __u32 protocol)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
struct nci_rf_discover_select_param param;
- struct nfc_target *target = NULL;
+ struct nfc_target *nci_target = NULL;
int i;
int rc = 0;
- pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
+ pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
(atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
@@ -459,25 +459,25 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
}
for (i = 0; i < ndev->n_targets; i++) {
- if (ndev->targets[i].idx == target_idx) {
- target = &ndev->targets[i];
+ if (ndev->targets[i].idx == target->idx) {
+ nci_target = &ndev->targets[i];
break;
}
}
- if (!target) {
+ if (!nci_target) {
pr_err("unable to find the selected target\n");
return -EINVAL;
}
- if (!(target->supported_protocols & (1 << protocol))) {
+ if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
return -EINVAL;
}
if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
- param.rf_discovery_id = target->idx;
+ param.rf_discovery_id = nci_target->logical_idx;
if (protocol == NFC_PROTO_JEWEL)
param.rf_protocol = NCI_RF_PROTOCOL_T1T;
@@ -501,11 +501,12 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
return rc;
}
-static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+static void nci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- pr_debug("target_idx %d\n", target_idx);
+ pr_debug("target_idx %d\n", target->idx);
if (!ndev->target_active_prot) {
pr_err("unable to deactivate target, no active target\n");
@@ -520,14 +521,14 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
}
}
-static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb,
data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
if (!ndev->target_active_prot) {
pr_err("unable to exchange data, no active target\n");
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index a0bc326308a5..76c48c5324f8 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -49,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
if (cb) {
ndev->data_exchange_cb = NULL;
- ndev->data_exchange_cb_context = 0;
+ ndev->data_exchange_cb_context = NULL;
/* forward skb to nfc core */
cb(cb_context, skb, err);
@@ -200,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
pr_err("error adding room for accumulated rx data\n");
kfree_skb(skb);
- skb = 0;
+ skb = NULL;
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
err = -ENOMEM;
goto exit;
@@ -216,7 +216,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
/* third, free old reassembly */
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
if (pbf == NCI_PBF_CONT) {
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index 6a63e5eb483d..6b7fd26c68d9 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
/* NCI status codes to Unix errno mapping */
int nci_to_errno(__u8 code)
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 2e3dee42196d..cb2646179e5f 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -227,7 +227,7 @@ static void nci_add_new_target(struct nci_dev *ndev,
for (i = 0; i < ndev->n_targets; i++) {
target = &ndev->targets[i];
- if (target->idx == ntf->rf_discovery_id) {
+ if (target->logical_idx == ntf->rf_discovery_id) {
/* This target already exists, add the new protocol */
nci_add_new_protocol(ndev, target, ntf->rf_protocol,
ntf->rf_tech_and_mode,
@@ -248,10 +248,10 @@ static void nci_add_new_target(struct nci_dev *ndev,
ntf->rf_tech_and_mode,
&ntf->rf_tech_specific_params);
if (!rc) {
- target->idx = ntf->rf_discovery_id;
+ target->logical_idx = ntf->rf_discovery_id;
ndev->n_targets++;
- pr_debug("target_idx %d, n_targets %d\n", target->idx,
+ pr_debug("logical idx %d, n_targets %d\n", target->logical_idx,
ndev->n_targets);
}
}
@@ -372,10 +372,11 @@ static void nci_target_auto_activated(struct nci_dev *ndev,
if (rc)
return;
- target->idx = ntf->rf_discovery_id;
+ target->logical_idx = ntf->rf_discovery_id;
ndev->n_targets++;
- pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets);
+ pr_debug("logical idx %d, n_targets %d\n",
+ target->logical_idx, ndev->n_targets);
nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
}
@@ -496,7 +497,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
/* drop partial rx data packet */
if (ndev->rx_data_reassembly) {
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
/* complete the data exchange transaction, if exists */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6404052d6c07..581d419083aa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
.name = NFC_GENL_MCAST_EVENT_NAME,
};
-struct genl_family nfc_genl_family = {
+static struct genl_family nfc_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = NFC_GENL_NAME,
@@ -63,19 +63,23 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
- NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
- NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
- NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
- NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
- if (target->nfcid1_len > 0)
- NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
- target->nfcid1);
- if (target->sensb_res_len > 0)
- NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
- target->sensb_res);
- if (target->sensf_res_len > 0)
- NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
- target->sensf_res);
+ if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
+ nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
+ nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
+ nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
+ goto nla_put_failure;
+ if (target->nfcid1_len > 0 &&
+ nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+ target->nfcid1))
+ goto nla_put_failure;
+ if (target->sensb_res_len > 0 &&
+ nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
+ target->sensb_res))
+ goto nla_put_failure;
+ if (target->sensf_res_len > 0 &&
+ nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
+ target->sensf_res))
+ goto nla_put_failure;
return genlmsg_end(msg, hdr);
@@ -124,7 +128,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
cb->args[1] = (long) dev;
}
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
cb->seq = dev->targets_generation;
@@ -137,7 +141,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
i++;
}
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
cb->args[0] = i;
@@ -170,7 +174,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
if (!hdr)
goto free_msg;
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -183,6 +188,37 @@ free_msg:
return -EMSGSIZE;
}
+int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_TARGET_LOST);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+ nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
int nfc_genl_device_added(struct nfc_dev *dev)
{
struct sk_buff *msg;
@@ -197,10 +233,11 @@ int nfc_genl_device_added(struct nfc_dev *dev)
if (!hdr)
goto free_msg;
- NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
- NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
- NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+ if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+ nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+ nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+ nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -229,7 +266,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
if (!hdr)
goto free_msg;
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -259,10 +297,11 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
if (cb)
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
- NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
- NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
- NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+ if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+ nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+ nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+ nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+ goto nla_put_failure;
return genlmsg_end(msg, hdr);
@@ -339,11 +378,14 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
if (!hdr)
goto free_msg;
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
- if (rf_mode == NFC_RF_INITIATOR)
- NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
- NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
- NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+ if (rf_mode == NFC_RF_INITIATOR &&
+ nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
+ nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -376,7 +418,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
if (!hdr)
goto free_msg;
- NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ec8794c1099c..3dd4232ae664 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -84,7 +84,7 @@ static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
return 0;
}
-static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
{
*gb_len = 0;
return NULL;
@@ -119,6 +119,7 @@ void nfc_genl_data_init(struct nfc_genl_data *genl_data);
void nfc_genl_data_exit(struct nfc_genl_data *genl_data);
int nfc_genl_targets_found(struct nfc_dev *dev);
+int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx);
int nfc_genl_device_added(struct nfc_dev *dev);
int nfc_genl_device_removed(struct nfc_dev *dev);
@@ -127,7 +128,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
-struct nfc_dev *nfc_get_device(unsigned idx);
+struct nfc_dev *nfc_get_device(unsigned int idx);
static inline void nfc_put_device(struct nfc_dev *dev)
{
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 5a839ceb2e82..ec1134c9e07f 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -92,6 +92,12 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
goto error;
}
+ if (addr->target_idx > dev->target_next_idx - 1 ||
+ addr->target_idx < dev->target_next_idx - dev->n_targets) {
+ rc = -EINVAL;
+ goto error;
+ }
+
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (rc)
goto put_dev;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631ea952..2c74daa5aca5 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -321,7 +321,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
return -ENOMEM;
nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
- if (!skb)
+ if (!nskb)
return -ENOMEM;
nskb->vlan_tci = 0;
@@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
return validate_actions(actions, key, depth + 1);
}
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+ if (flow_key->eth.type == htons(ETH_P_IP)) {
+ if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+ return 0;
+ } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+ if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key)
{
@@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
if (flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
-
- break;
+ return validate_tp_port(flow_key);
case OVS_KEY_ATTR_UDP:
if (flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
- break;
+ return validate_tp_port(flow_key);
default:
return -EINVAL;
@@ -778,15 +786,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
tcp_flags = flow->tcp_flags;
spin_unlock_bh(&flow->lock);
- if (used)
- NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+ if (used &&
+ nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+ goto nla_put_failure;
- if (stats.n_packets)
- NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
- sizeof(struct ovs_flow_stats), &stats);
+ if (stats.n_packets &&
+ nla_put(skb, OVS_FLOW_ATTR_STATS,
+ sizeof(struct ovs_flow_stats), &stats))
+ goto nla_put_failure;
- if (tcp_flags)
- NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+ if (tcp_flags &&
+ nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
+ goto nla_put_failure;
/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
* this is the first flow to be dumped into 'skb'. This is unusual for
@@ -1168,7 +1179,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
goto nla_put_failure;
get_dp_stats(dp, &dp_stats);
- NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+ if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+ goto nla_put_failure;
return genlmsg_end(skb, ovs_header);
@@ -1468,14 +1480,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
ovs_header->dp_ifindex = get_dpifindex(vport->dp);
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
- NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+ if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
+ nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
+ nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
+ nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+ goto nla_put_failure;
ovs_vport_get_stats(vport, &vport_stats);
- NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
- &vport_stats);
+ if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+ &vport_stats))
+ goto nla_put_failure;
err = ovs_vport_get_options(vport, skb);
if (err == -EMSGSIZE)
@@ -1641,10 +1655,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
OVS_VPORT_CMD_NEW);
if (IS_ERR(reply)) {
- err = PTR_ERR(reply);
netlink_set_err(init_net.genl_sock, 0,
- ovs_dp_vport_multicast_group.id, err);
- return 0;
+ ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+ goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef1..6d4d8097cf96 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
u8 tcp_flags = 0;
if (flow->key.eth.type == htons(ETH_P_IP) &&
- flow->key.ip.proto == IPPROTO_TCP) {
+ flow->key.ip.proto == IPPROTO_TCP &&
+ likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
}
@@ -1174,11 +1175,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
- if (swkey->phy.priority)
- NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+ if (swkey->phy.priority &&
+ nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+ goto nla_put_failure;
- if (swkey->phy.in_port != USHRT_MAX)
- NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+ if (swkey->phy.in_port != USHRT_MAX &&
+ nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
+ goto nla_put_failure;
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
@@ -1188,8 +1191,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
- NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+ goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.tci)
goto unencap;
@@ -1200,7 +1204,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (swkey->eth.type == htons(ETH_P_802_2))
goto unencap;
- NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+ goto nla_put_failure;
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index c1068aed03d1..3fd6c0d88e12 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -140,9 +140,9 @@ int ovs_netdev_get_ifindex(const struct vport *vport)
return netdev_vport->dev->ifindex;
}
-static unsigned packet_length(const struct sk_buff *skb)
+static unsigned int packet_length(const struct sk_buff *skb)
{
- unsigned length = skb->len - ETH_HLEN;
+ unsigned int length = skb->len - ETH_HLEN;
if (skb->protocol == htons(ETH_P_8021Q))
length -= VLAN_HLEN;
@@ -157,9 +157,9 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
int len;
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
- if (net_ratelimit())
- pr_warn("%s: dropped over-mtu packet: %d > %d\n",
- ovs_dp_name(vport->dp), packet_length(skb), mtu);
+ net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+ ovs_dp_name(vport->dp),
+ packet_length(skb), mtu);
goto error;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4f2c0df79563..0f661745df0f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1654,7 +1654,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
skb->data = skb_head;
skb->len = skb_len;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = nskb;
}
@@ -1764,7 +1764,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
po->tp_reserve;
} else {
- unsigned maclen = skb_network_offset(skb);
+ unsigned int maclen = skb_network_offset(skb);
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
@@ -3224,10 +3224,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
- int val;
+ int val, lv = sizeof(val);
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- void *data;
+ void *data = &val;
struct tpacket_stats st;
union tpacket_stats_u st_u;
@@ -3242,21 +3242,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case PACKET_STATISTICS:
- if (po->tp_version == TPACKET_V3) {
- len = sizeof(struct tpacket_stats_v3);
- } else {
- if (len > sizeof(struct tpacket_stats))
- len = sizeof(struct tpacket_stats);
- }
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->tp_version == TPACKET_V3) {
+ lv = sizeof(struct tpacket_stats_v3);
memcpy(&st_u.stats3, &po->stats,
- sizeof(struct tpacket_stats));
+ sizeof(struct tpacket_stats));
st_u.stats3.tp_freeze_q_cnt =
- po->stats_u.stats3.tp_freeze_q_cnt;
+ po->stats_u.stats3.tp_freeze_q_cnt;
st_u.stats3.tp_packets += po->stats.tp_drops;
data = &st_u.stats3;
} else {
+ lv = sizeof(struct tpacket_stats);
st = po->stats;
st.tp_packets += st.tp_drops;
data = &st;
@@ -3265,31 +3261,16 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
spin_unlock_bh(&sk->sk_receive_queue.lock);
break;
case PACKET_AUXDATA:
- if (len > sizeof(int))
- len = sizeof(int);
val = po->auxdata;
-
- data = &val;
break;
case PACKET_ORIGDEV:
- if (len > sizeof(int))
- len = sizeof(int);
val = po->origdev;
-
- data = &val;
break;
case PACKET_VNET_HDR:
- if (len > sizeof(int))
- len = sizeof(int);
val = po->has_vnet_hdr;
-
- data = &val;
break;
case PACKET_VERSION:
- if (len > sizeof(int))
- len = sizeof(int);
val = po->tp_version;
- data = &val;
break;
case PACKET_HDRLEN:
if (len > sizeof(int))
@@ -3309,39 +3290,28 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
default:
return -EINVAL;
}
- data = &val;
break;
case PACKET_RESERVE:
- if (len > sizeof(unsigned int))
- len = sizeof(unsigned int);
val = po->tp_reserve;
- data = &val;
break;
case PACKET_LOSS:
- if (len > sizeof(unsigned int))
- len = sizeof(unsigned int);
val = po->tp_loss;
- data = &val;
break;
case PACKET_TIMESTAMP:
- if (len > sizeof(int))
- len = sizeof(int);
val = po->tp_tstamp;
- data = &val;
break;
case PACKET_FANOUT:
- if (len > sizeof(int))
- len = sizeof(int);
val = (po->fanout ?
((u32)po->fanout->id |
((u32)po->fanout->type << 16)) :
0);
- data = &val;
break;
default:
return -ENOPROTOOPT;
}
+ if (len > lv)
+ len = lv;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, data, len))
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699fbf34..779ce4ff92ec 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,7 +129,7 @@ static const struct net_proto_family phonet_proto_family = {
/* Phonet device header operations */
static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
+ const void *saddr, unsigned int len)
{
u8 *media = skb_push(skb, 1);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9726fe684ab8..9dd4f926f7d1 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -273,7 +273,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
hdr = pnp_hdr(skb);
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
- (unsigned)hdr->data[0]);
+ (unsigned int)hdr->data[0]);
return -EOPNOTSUPP;
}
@@ -305,7 +305,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
default:
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
- (unsigned)hdr->data[1]);
+ (unsigned int)hdr->data[1]);
return -EOPNOTSUPP;
}
if (wake)
@@ -478,9 +478,9 @@ static void pipe_destruct(struct sock *sk)
skb_queue_purge(&pn->ctrlreq_queue);
}
-static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
{
- unsigned i;
+ unsigned int i;
u8 final_fc = PN_NO_FLOW_CONTROL;
for (i = 0; i < n; i++) {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b9a85ecc4c7..36f75a9e2c3d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -44,7 +44,7 @@ struct phonet_net {
struct phonet_routes routes;
};
-int phonet_net_id __read_mostly;
+static int phonet_net_id __read_mostly;
static struct phonet_net *phonet_pernet(struct net *net)
{
@@ -268,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
static void phonet_route_autodel(struct net_device *dev)
{
struct phonet_net *pnn = phonet_pernet(dev_net(dev));
- unsigned i;
+ unsigned int i;
DECLARE_BITMAP(deleted, 64);
/* Remove left-over Phonet routes */
@@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net)
static void __net_exit phonet_exit_net(struct net *net)
{
- struct phonet_net *pnn = phonet_pernet(net);
- struct net_device *dev;
- unsigned i;
-
- rtnl_lock();
- for_each_netdev(net, dev)
- phonet_device_destroy(dev);
-
- for (i = 0; i < 64; i++) {
- dev = pnn->routes.table[i];
- if (dev) {
- rtm_phonet_notify(RTM_DELROUTE, dev, i);
- dev_put(dev);
- }
- }
- rtnl_unlock();
-
proc_net_remove(net, "phonet");
}
@@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = {
/* Initialize Phonet devices list */
int __init phonet_device_init(void)
{
- int err = register_pernet_device(&phonet_net_ops);
+ int err = register_pernet_subsys(&phonet_net_ops);
if (err)
return err;
@@ -377,7 +360,7 @@ void phonet_device_exit(void)
{
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
- unregister_pernet_device(&phonet_net_ops);
+ unregister_pernet_subsys(&phonet_net_ops);
proc_net_remove(&init_net, "pnresource");
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f6761777d..cfdf135fcd69 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
ifm->ifa_flags = IFA_F_PERMANENT;
ifm->ifa_scope = RT_SCOPE_LINK;
ifm->ifa_index = dev->ifindex;
- NLA_PUT_U8(skb, IFA_LOCAL, addr);
+ if (nla_put_u8(skb, IFA_LOCAL, addr))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- NLA_PUT_U8(skb, RTA_DST, dst);
- NLA_PUT_U32(skb, RTA_OIF, dev->ifindex);
+ if (nla_put_u8(skb, RTA_DST, dst) ||
+ nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff30dfa9..89cfa9ce4939 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -58,7 +58,7 @@ static struct {
void __init pn_sock_init(void)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < PN_HASHSIZE; i++)
INIT_HLIST_HEAD(pnsocks.hlist + i);
@@ -116,7 +116,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
{
struct hlist_head *hlist = pnsocks.hlist;
- unsigned h;
+ unsigned int h;
rcu_read_lock();
for (h = 0; h < PN_HASHSIZE; h++) {
@@ -545,7 +545,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
struct hlist_head *hlist = pnsocks.hlist;
struct hlist_node *node;
struct sock *sknode;
- unsigned h;
+ unsigned int h;
for (h = 0; h < PN_HASHSIZE; h++) {
sk_for_each_rcu(sknode, node, hlist) {
@@ -710,7 +710,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
void pn_sock_unbind_all_res(struct sock *sk)
{
- unsigned res, match = 0;
+ unsigned int res, match = 0;
mutex_lock(&resource_mutex);
for (res = 0; res < 256; res++) {
@@ -732,7 +732,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
- unsigned i;
+ unsigned int i;
if (!net_eq(net, &init_net))
return NULL;
@@ -750,7 +750,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
{
struct net *net = seq_file_net(seq);
- unsigned i;
+ unsigned int i;
BUG_ON(!net_eq(net, &init_net));
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7dbdae2..696348fd31a1 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,10 @@
#include <linux/errno.h>
#include <linux/init.h>
+#include <net/sock.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+
#define DYNAMIC_PORT_MIN 0x40
#define DYNAMIC_PORT_MAX 0x7f
@@ -46,7 +50,8 @@ static void set_local_port_range(int range[2])
void phonet_get_local_port_range(int *min, int *max)
{
- unsigned seq;
+ unsigned int seq;
+
do {
seq = read_seqbegin(&local_port_range_lock);
if (min)
@@ -93,19 +98,13 @@ static struct ctl_table phonet_table[] = {
{ }
};
-static struct ctl_path phonet_ctl_path[] = {
- { .procname = "net", },
- { .procname = "phonet", },
- { },
-};
-
int __init phonet_sysctl_init(void)
{
- phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
+ phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
return phonet_table_hrd == NULL ? -ENOMEM : 0;
}
void phonet_sysctl_exit(void)
{
- unregister_sysctl_table(phonet_table_hrd);
+ unregister_net_sysctl_table(phonet_table_hrd);
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf164eb..8d2b3d5a7c21 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@ struct rds_ib_device {
struct work_struct free_work;
};
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
/* bits for i_ack_flags */
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 1253b006efdb..7e643bafb4af 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -106,22 +106,15 @@ static ctl_table rds_ib_sysctl_table[] = {
{ }
};
-static struct ctl_path rds_ib_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "rds", },
- { .procname = "ib", },
- { }
-};
-
void rds_ib_sysctl_exit(void)
{
if (rds_ib_sysctl_hdr)
- unregister_sysctl_table(rds_ib_sysctl_hdr);
+ unregister_net_sysctl_table(rds_ib_sysctl_hdr);
}
int rds_ib_sysctl_init(void)
{
- rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
+ rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
if (!rds_ib_sysctl_hdr)
return -ENOMEM;
return 0;
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index e2e47176e729..5d5ebd576f3f 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -109,22 +109,15 @@ static ctl_table rds_iw_sysctl_table[] = {
{ }
};
-static struct ctl_path rds_iw_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "rds", },
- { .procname = "iw", },
- { }
-};
-
void rds_iw_sysctl_exit(void)
{
if (rds_iw_sysctl_hdr)
- unregister_sysctl_table(rds_iw_sysctl_hdr);
+ unregister_net_sysctl_table(rds_iw_sysctl_hdr);
}
int rds_iw_sysctl_init(void)
{
- rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
+ rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
if (!rds_iw_sysctl_hdr)
return -ENOMEM;
return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 25ad0c77a26c..907214b4c4d0 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -92,17 +92,10 @@ static ctl_table rds_sysctl_rds_table[] = {
{ }
};
-static struct ctl_path rds_sysctl_path[] = {
- { .procname = "net", },
- { .procname = "rds", },
- { }
-};
-
-
void rds_sysctl_exit(void)
{
if (rds_sysctl_reg_table)
- unregister_sysctl_table(rds_sysctl_reg_table);
+ unregister_net_sysctl_table(rds_sysctl_reg_table);
}
int rds_sysctl_init(void)
@@ -110,7 +103,7 @@ int rds_sysctl_init(void)
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
- rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
+ rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 8b5cc4aa8868..72981375f47c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -145,7 +145,7 @@ int rds_tcp_listen_init(void)
if (ret < 0)
goto out;
- sock->sk->sk_reuse = 1;
+ sock->sk->sk_reuse = SK_CAN_REUSE;
rds_tcp_nonagle(sock);
write_lock_bh(&sock->sk->sk_callback_lock);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 906cc05bba63..28dbdb911b85 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -37,7 +37,7 @@
static int rose_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 47f1fdb346b0..7ca57741b2fb 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -399,7 +399,7 @@ int rose_parse_facilities(unsigned char *p, unsigned packet_len,
facilities_len = *p++;
- if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
+ if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
return 0;
while (facilities_len >= 3 && *p == 0x00) {
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index df6d9dac2186..94ca9c2ccd69 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -118,18 +118,12 @@ static ctl_table rose_table[] = {
{ }
};
-static struct ctl_path rose_path[] = {
- { .procname = "net", },
- { .procname = "rose", },
- { }
-};
-
void __init rose_register_sysctl(void)
{
- rose_table_header = register_sysctl_paths(rose_path, rose_table);
+ rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
}
void rose_unregister_sysctl(void)
{
- unregister_sysctl_table(rose_table_header);
+ unregister_net_sysctl_table(rose_table_header);
}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 74c064c0dfdd..05996d0dd828 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_RXRPC);
-unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
+unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "RxRPC debugging mask");
@@ -513,7 +513,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- unsigned min_sec_level;
+ unsigned int min_sec_level;
int ret;
_enter(",%d,%d,,%d", level, optname, optlen);
@@ -555,13 +555,13 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
case RXRPC_MIN_SECURITY_LEVEL:
ret = -EINVAL;
- if (optlen != sizeof(unsigned))
+ if (optlen != sizeof(unsigned int))
goto error;
ret = -EISCONN;
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
goto error;
ret = get_user(min_sec_level,
- (unsigned __user *) optval);
+ (unsigned int __user *) optval);
if (ret < 0)
goto error;
ret = -EINVAL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c3126e864f3c..e4d9cbcff402 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,7 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-static unsigned rxrpc_ack_defer = 1;
+static unsigned int rxrpc_ack_defer = 1;
static const char *const rxrpc_acks[] = {
"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
@@ -548,11 +548,11 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
* process the extra information that may be appended to an ACK packet
*/
static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned latest, int nAcks)
+ unsigned int latest, int nAcks)
{
struct rxrpc_ackinfo ackinfo;
struct rxrpc_peer *peer;
- unsigned mtu;
+ unsigned int mtu;
if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
_leave(" [no ackinfo]");
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bf656c230ba9..a3bbb360a3f9 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -38,8 +38,8 @@ const char *const rxrpc_call_states[] = {
struct kmem_cache *rxrpc_call_jar;
LIST_HEAD(rxrpc_calls);
DEFINE_RWLOCK(rxrpc_call_lock);
-static unsigned rxrpc_call_max_lifetime = 60;
-static unsigned rxrpc_dead_call_timeout = 2;
+static unsigned int rxrpc_call_max_lifetime = 60;
+static unsigned int rxrpc_dead_call_timeout = 2;
static void rxrpc_destroy_call(struct work_struct *work);
static void rxrpc_call_life_expired(unsigned long _call);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 1a2b0633fece..529572f18d1f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -76,7 +76,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
* --ANK */
// ret = -ENOBUFS;
// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-// (unsigned) sk->sk_rcvbuf)
+// (unsigned int) sk->sk_rcvbuf)
// goto out;
ret = sk_filter(sk, skb);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 8e22bd345e71..a693aca2ae2e 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -83,7 +83,7 @@ struct rxrpc_skb_priv {
struct rxrpc_call *call; /* call with which associated */
unsigned long resend_at; /* time in jiffies at which to resend */
union {
- unsigned offset; /* offset into buffer of next read */
+ unsigned int offset; /* offset into buffer of next read */
int remain; /* amount of space remaining for next write */
u32 error; /* network error code */
bool need_resend; /* T if needs resending */
@@ -176,9 +176,9 @@ struct rxrpc_peer {
struct list_head error_targets; /* targets for net error distribution */
spinlock_t lock; /* access lock */
atomic_t usage;
- unsigned if_mtu; /* interface MTU for this peer */
- unsigned mtu; /* network MTU for this peer */
- unsigned maxdata; /* data size (MTU - hdrsize) */
+ unsigned int if_mtu; /* interface MTU for this peer */
+ unsigned int mtu; /* network MTU for this peer */
+ unsigned int maxdata; /* data size (MTU - hdrsize) */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
int debug_id; /* debug ID for printks */
int net_error; /* network error distributed */
@@ -187,8 +187,8 @@ struct rxrpc_peer {
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
suseconds_t rtt; /* current RTT estimate (in uS) */
- unsigned rtt_point; /* next entry at which to insert */
- unsigned rtt_usage; /* amount of cache actually used */
+ unsigned int rtt_point; /* next entry at which to insert */
+ unsigned int rtt_usage; /* amount of cache actually used */
suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
};
@@ -271,7 +271,7 @@ struct rxrpc_connection {
} state;
int error; /* error code for local abort */
int debug_id; /* debug ID for printks */
- unsigned call_counter; /* call ID counter */
+ unsigned int call_counter; /* call ID counter */
atomic_t serial; /* packet serial number counter */
atomic_t hi_serial; /* highest serial number received */
u8 avail_calls; /* number of calls available */
@@ -592,7 +592,7 @@ extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
/*
* debug tracing
*/
-extern unsigned rxrpc_debug;
+extern unsigned int rxrpc_debug;
#define dbgprintk(FMT,...) \
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ae3a035f5390..8b1f9f49960f 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -82,7 +82,7 @@ static int rxrpc_vet_description_s(const char *desc)
* - the caller guarantees we have at least 4 words
*/
static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
- unsigned toklen)
+ unsigned int toklen)
{
struct rxrpc_key_token *token, **pptoken;
size_t plen;
@@ -210,10 +210,10 @@ static void rxrpc_rxk5_free(struct rxk5_key *rxk5)
*/
static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
const __be32 **_xdr,
- unsigned *_toklen)
+ unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp;
/* there must be at least one name, and at least #names+1 length
* words */
@@ -286,10 +286,10 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
size_t max_data_size,
const __be32 **_xdr,
- unsigned *_toklen)
+ unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len;
/* there must be at least one tag and one length word */
if (toklen <= 8)
@@ -330,11 +330,11 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
u8 max_n_elem,
size_t max_elem_size,
const __be32 **_xdr,
- unsigned *_toklen)
+ unsigned int *_toklen)
{
struct krb5_tagged_data *td;
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, n_elem, loop;
+ unsigned int toklen = *_toklen, n_elem, loop;
int ret;
/* there must be at least one count */
@@ -380,10 +380,10 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
* extract a krb5 ticket
*/
static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
- const __be32 **_xdr, unsigned *_toklen)
+ const __be32 **_xdr, unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len;
/* there must be at least one length word */
if (toklen <= 4)
@@ -419,7 +419,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
* - the caller guarantees we have at least 4 words
*/
static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
- unsigned toklen)
+ unsigned int toklen)
{
struct rxrpc_key_token *token, **pptoken;
struct rxk5_key *rxk5;
@@ -549,7 +549,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
{
const __be32 *xdr = data, *token;
const char *cp;
- unsigned len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
int ret;
_enter(",{%x,%x,%x,%x},%zu",
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 7635107726ce..f226709ebd8f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -31,7 +31,7 @@
#define REALM_SZ 40 /* size of principal's auth domain */
#define SNAME_SZ 40 /* size of service name */
-unsigned rxrpc_debug;
+unsigned int rxrpc_debug;
module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "rxkad debugging mask");
@@ -207,7 +207,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct rxrpc_crypt iv;
struct scatterlist sg[16];
struct sk_buff *trailer;
- unsigned len;
+ unsigned int len;
u16 check;
int nsg;
@@ -826,7 +826,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
struct rxrpc_crypt iv, key;
struct scatterlist sg[1];
struct in_addr addr;
- unsigned life;
+ unsigned int life;
time_t issue, now;
bool little_endian;
int ret;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f81d53d..e7a8976bf25c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,28 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
+config NET_SCH_FQ_CODEL
+ tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
+ help
+ Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_fq_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2b51d3..5940a1992f0d 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,8 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
+obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 93fdf131bd75..5cfb160df063 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+ if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+ goto nla_put_failure;
for (i = 0; i < (hinfo->hmask + 1); i++) {
p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
@@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
p = s_p;
}
}
- NLA_PUT_U32(skb, TCA_FCNT, n_i);
+ if (nla_put_u32(skb, TCA_FCNT, n_i))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
return n_i;
@@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
if (a->ops == NULL || a->ops->dump == NULL)
return err;
- NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+ if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+ goto nla_put_failure;
if (tcf_action_copy_stats(skb, a, 0))
goto nla_put_failure;
nest = nla_nest_start(skb, TCA_OPTIONS);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a73431ac4..2c8ad7c86e43 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -397,7 +397,7 @@ static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
while (len > 1) {
switch (xh[off]) {
- case IPV6_TLV_PAD0:
+ case IPV6_TLV_PAD1:
optlen = 1;
break;
case IPV6_TLV_JUMBO:
@@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb,
};
struct tcf_t t;
- NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
- NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b77f5a06a658..f10fb8256442 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
};
struct tcf_t t;
- NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
#ifdef CONFIG_GACT_PROB
if (gact->tcfg_ptype) {
struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
.ptype = gact->tcfg_ptype,
};
- NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
+ if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
+ goto nla_put_failure;
}
#endif
t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
- NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60f8f616e8fa..60e281ad0f07 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
/*
- * net/sched/ipt.c iptables target interface
+ * net/sched/ipt.c iptables target interface
*
*TODO: Add other tables. For now we only support the ipv4 table targets
*
@@ -235,9 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
result = TC_ACT_PIPE;
break;
default:
- if (net_ratelimit())
- pr_notice("tc filter: Bogus netfilter code"
- " %d assume ACCEPT\n", ret);
+ net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
+ ret);
result = TC_POLICE_OK;
break;
}
@@ -267,15 +266,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
c.refcnt = ipt->tcf_refcnt - ref;
strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
- NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
- NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index);
- NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook);
- NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
- NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname);
+ if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
+ nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
+ nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
+ nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
+ nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
+ goto nla_put_failure;
tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
- NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
+ if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+ goto nla_put_failure;
kfree(t);
return skb->len;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e051398fdf6b..fe81cc18e9e0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -174,9 +174,8 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
}
if (!(dev->flags & IFF_UP)) {
- if (net_ratelimit())
- pr_notice("tc mirred to Houston: device %s is down\n",
- dev->name);
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
goto out;
}
@@ -227,11 +226,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
};
struct tcf_t t;
- NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
- NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b354869..b5d029eb44f2 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
- NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 10d3aed86560..26aa2f6ce257 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
opt->refcnt = p->tcf_refcnt - ref;
opt->bindcnt = p->tcf_bindcnt - bind;
- NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
+ if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
- NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+ goto nla_put_failure;
kfree(opt);
return skb->len;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6fb3f5af0f85..a9de23297d47 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
opt.rate = police->tcfp_R_tab->rate;
if (police->tcfp_P_tab)
opt.peakrate = police->tcfp_P_tab->rate;
- NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
- if (police->tcfp_result)
- NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
- if (police->tcfp_ewma_rate)
- NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
+ if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
+ goto nla_put_failure;
+ if (police->tcfp_result &&
+ nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+ goto nla_put_failure;
+ if (police->tcfp_ewma_rate &&
+ nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 73e0a3ab4d55..3922f2a2821b 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
- NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
+ if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
+ nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
- NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35dbbe91027e..476e0fac6712 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
- if (d->flags & SKBEDIT_F_PRIORITY)
- NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
- &d->priority);
- if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
- NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
- sizeof(d->queue_mapping), &d->queue_mapping);
- if (d->flags & SKBEDIT_F_MARK)
- NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
- &d->mark);
+ if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+ if ((d->flags & SKBEDIT_F_PRIORITY) &&
+ nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
+ &d->priority))
+ goto nla_put_failure;
+ if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+ nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
+ sizeof(d->queue_mapping), &d->queue_mapping))
+ goto nla_put_failure;
+ if ((d->flags & SKBEDIT_F_MARK) &&
+ nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
+ &d->mark))
+ goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
- NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
+ if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a69d44f1dac5..f452f696b4b3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
tcm->tcm_parent = tp->classid;
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
- NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
+ if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
+ goto nla_put_failure;
tcm->tcm_handle = fh;
if (RTM_DELTFILTER != event) {
tcm->tcm_handle = 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index ea1f70b5a5f4..590960a22a77 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (f->res.classid)
- NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid);
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 1afaa284fcd7..7743ea8d1d38 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -22,22 +22,6 @@
#include <net/sock.h>
#include <net/cls_cgroup.h>
-static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
-static void cgrp_destroy(struct cgroup *cgrp);
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
-
-struct cgroup_subsys net_cls_subsys = {
- .name = "net_cls",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
- .populate = cgrp_populate,
-#ifdef CONFIG_NET_CLS_CGROUP
- .subsys_id = net_cls_subsys_id,
-#endif
- .module = THIS_MODULE,
-};
-
-
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -86,12 +70,19 @@ static struct cftype ss_files[] = {
.read_u64 = read_classid,
.write_u64 = write_classid,
},
+ { } /* terminate */
};
-static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
-{
- return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
-}
+struct cgroup_subsys net_cls_subsys = {
+ .name = "net_cls",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+#ifdef CONFIG_NET_CLS_CGROUP
+ .subsys_id = net_cls_subsys_id,
+#endif
+ .base_cftypes = ss_files,
+ .module = THIS_MODULE,
+};
struct cls_cgroup_head {
u32 handle;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 1d8bd0dbcd1f..ccd08c8dc6a7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
- NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
+ if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
+ nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
+ goto nla_put_failure;
if (f->mask != ~0 || f->xor != 0) {
- NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
- NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
+ if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
+ nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
+ goto nla_put_failure;
}
- if (f->rshift)
- NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
- if (f->addend)
- NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
+ if (f->rshift &&
+ nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
+ goto nla_put_failure;
+ if (f->addend &&
+ nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
+ goto nla_put_failure;
- if (f->divisor)
- NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
- if (f->baseclass)
- NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
+ if (f->divisor &&
+ nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
+ goto nla_put_failure;
+ if (f->baseclass &&
+ nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
+ goto nla_put_failure;
- if (f->perturb_period)
- NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
+ if (f->perturb_period &&
+ nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 389af152ec45..8384a4797240 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (f->res.classid)
- NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid);
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
+ goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
- if (strlen(f->indev))
- NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev);
+ if (strlen(f->indev) &&
+ nla_put_string(skb, TCA_FW_INDEV, f->indev))
+ goto nla_put_failure;
#endif /* CONFIG_NET_CLS_IND */
- if (head->mask != 0xFFFFFFFF)
- NLA_PUT_U32(skb, TCA_FW_MASK, head->mask);
+ if (head->mask != 0xFFFFFFFF &&
+ nla_put_u32(skb, TCA_FW_MASK, head->mask))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 13ab66e9df58..36fec4227401 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (!(f->handle & 0x8000)) {
id = f->id & 0xFF;
- NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
+ if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
+ goto nla_put_failure;
}
if (f->handle & 0x80000000) {
- if ((f->handle >> 16) != 0xFFFF)
- NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
+ if ((f->handle >> 16) != 0xFFFF &&
+ nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
+ goto nla_put_failure;
} else {
id = f->id >> 16;
- NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
+ if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
+ goto nla_put_failure;
}
- if (f->res.classid)
- NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b01427924f81..18ab93ec8d7e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst);
+ if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
+ goto nla_put_failure;
pinfo.dpi = s->dpi;
pinfo.spi = f->spi;
pinfo.protocol = s->protocol;
pinfo.tunnelid = s->tunnelid;
pinfo.tunnelhdr = f->tunnelhdr;
pinfo.pad = 0;
- NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
- if (f->res.classid)
- NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
- if (((f->handle >> 8) & 0xFF) != 16)
- NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
+ if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
+ goto nla_put_failure;
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
+ goto nla_put_failure;
+ if (((f->handle >> 8) & 0xFF) != 16 &&
+ nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index dbe199234c63..fe29420d0b0e 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
if (!fh) {
t->tcm_handle = ~0; /* whatever ... */
- NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
- NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
- NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
- NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
+ if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+ nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+ nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+ nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
} else {
if (p->perfect) {
@@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
}
}
pr_debug("handle = %d\n", t->tcm_handle);
- if (r->res.class)
- NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
+ if (r->res.class &&
+ nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+ goto nla_put_failure;
if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 939b627b4795..d45373fb00b9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -234,8 +234,7 @@ out:
return -1;
deadloop:
- if (net_ratelimit())
- pr_warning("cls_u32: dead loop\n");
+ net_warn_ratelimited("cls_u32: dead loop\n");
return -1;
}
@@ -733,36 +732,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor + 1;
- NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
+ if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
+ goto nla_put_failure;
} else {
- NLA_PUT(skb, TCA_U32_SEL,
- sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
- &n->sel);
+ if (nla_put(skb, TCA_U32_SEL,
+ sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
+ &n->sel))
+ goto nla_put_failure;
if (n->ht_up) {
u32 htid = n->handle & 0xFFFFF000;
- NLA_PUT_U32(skb, TCA_U32_HASH, htid);
+ if (nla_put_u32(skb, TCA_U32_HASH, htid))
+ goto nla_put_failure;
}
- if (n->res.classid)
- NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
- if (n->ht_down)
- NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
+ if (n->res.classid &&
+ nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
+ goto nla_put_failure;
+ if (n->ht_down &&
+ nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
+ goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK
- if (n->mark.val || n->mark.mask)
- NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
+ if ((n->mark.val || n->mark.mask) &&
+ nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
+ goto nla_put_failure;
#endif
if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
- if (strlen(n->indev))
- NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
+ if (strlen(n->indev) &&
+ nla_put_string(skb, TCA_U32_INDEV, n->indev))
+ goto nla_put_failure;
#endif
#ifdef CONFIG_CLS_U32_PERF
- NLA_PUT(skb, TCA_U32_PCNT,
- sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
- n->pf);
+ if (nla_put(skb, TCA_U32_PCNT,
+ sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
+ n->pf))
+ goto nla_put_failure;
#endif
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 1363bf14e61b..4790c696cbce 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v,
static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
- if (v->val && v->len)
- NLA_PUT(skb, tlv, v->len, (void *) v->val);
+ if (v->val && v->len &&
+ nla_put(skb, tlv, v->len, (void *) v->val))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v,
static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
- if (v->len == sizeof(unsigned long))
- NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
- else if (v->len == sizeof(u32))
- NLA_PUT_U32(skb, tlv, v->val);
+ if (v->len == sizeof(unsigned long)) {
+ if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
+ goto nla_put_failure;
+ } else if (v->len == sizeof(u32)) {
+ if (nla_put_u32(skb, tlv, v->val))
+ goto nla_put_failure;
+ }
return 0;
@@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
- NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr);
+ if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
+ goto nla_put_failure;
ops = meta_type_ops(&meta->lvalue);
if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 88d93eb92507..3a633debb6df 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
if (top_start == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
+ if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
+ goto nla_put_failure;
list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
if (list_start == NULL)
@@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags
};
- NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
+ if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
+ goto nla_put_failure;
if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0)
@@ -535,9 +537,7 @@ pop_stack:
return res;
stack_overflow:
- if (net_ratelimit())
- pr_warning("tc ematch: local stack overflow,"
- " increase NET_EMATCH_STACK\n");
+ net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3d8981fde301..085ce53d570a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
nest = nla_nest_start(skb, TCA_STAB);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+ if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
@@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = atomic_read(&q->refcnt);
- NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+ if (nla_put_string(skb, TCA_KIND, q->ops->id))
+ goto nla_put_failure;
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto nla_put_failure;
q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
tcm->tcm_info = 0;
- NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+ if (nla_put_string(skb, TCA_KIND, q->ops->id))
+ goto nla_put_failure;
if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
goto nla_put_failure;
@@ -1688,12 +1691,10 @@ reclassify:
tp = otp;
if (verd++ >= MAX_REC_LOOP) {
- if (net_ratelimit())
- pr_notice("%s: packet reclassify loop"
- " rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff,
- ntohs(tp->protocol));
+ net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
+ tp->q->ops->id,
+ tp->prio & 0xffff,
+ ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e49061a0d..8522a4793374 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -423,8 +423,6 @@ drop: __maybe_unused
}
return ret;
}
- qdisc_bstats_update(sch, skb);
- bstats_update(&flow->bstats, skb);
/*
* Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc
@@ -472,6 +470,8 @@ static void sch_atm_dequeue(unsigned long data)
if (unlikely(!skb))
break;
+ qdisc_bstats_update(sch, skb);
+ bstats_update(&flow->bstats, skb);
pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
/* remove any LL header somebody else has attached */
skb_pull(skb, skb_network_offset(skb));
@@ -601,7 +601,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
+ if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
+ goto nla_put_failure;
if (flow->vcc) {
struct sockaddr_atmpvc pvc;
int state;
@@ -610,15 +611,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;
pvc.sap_addr.vci = flow->vcc->vci;
- NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
+ if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
+ goto nla_put_failure;
state = ATM_VF2VS(flow->vcc->flags);
- NLA_PUT_U32(skb, TCA_ATM_STATE, state);
+ if (nla_put_u32(skb, TCA_ATM_STATE, state))
+ goto nla_put_failure;
+ }
+ if (flow->excess) {
+ if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
+ goto nla_put_failure;
}
- if (flow->excess)
- NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
- else
- NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-
nla_nest_end(skb, nest);
return skb->len;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 24d94c097b35..6aabd77d1cfd 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
- NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
+ if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
@@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
opt.minidle = (u32)(-cl->minidle);
opt.offtime = cl->offtime;
opt.change = ~0;
- NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
@@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
opt.priority = cl->priority + 1;
opt.cpriority = cl->cpriority + 1;
opt.weight = cl->weight;
- NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
@@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
- NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
@@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
opt.split = cl->split ? cl->split->common.classid : 0;
opt.defmap = cl->defmap;
opt.defchange = ~0;
- NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
+ goto nla_put_failure;
}
return skb->len;
@@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
opt.police = cl->police;
opt.__res1 = 0;
opt.__res2 = 0;
- NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
+ goto nla_put_failure;
}
return skb->len;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 7e267d7b9c75..cc37dd52ecf9 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
q->stats.pdrop++;
- sch->qstats.drops++;
- kfree_skb(skb);
- return NET_XMIT_DROP;
+ return qdisc_drop(skb, sch);
- congestion_drop:
+congestion_drop:
qdisc_drop(skb, sch);
return NET_XMIT_CN;
- other_drop:
+other_drop:
if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
@@ -515,8 +513,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
- NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
+ if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
+ nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 000000000000..2f9ab17db85a
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,276 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ * Implemented on linux by :
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/prefetch.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_ECN,
+ q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ const struct codel_sched_data *q = qdisc_priv(sch);
+ struct tc_codel_xstats st = {
+ .maxpacket = q->stats.maxpacket,
+ .count = q->vars.count,
+ .lastcount = q->vars.lastcount,
+ .drop_overlimit = q->drop_overlimit,
+ .ldelay = codel_time_to_us(q->vars.ldelay),
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ if (q->vars.dropping) {
+ codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+ if (delta >= 0)
+ st.drop_next = codel_time_to_us(delta);
+ else
+ st.drop_next = -codel_time_to_us(-delta);
+ }
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 6b7fe4a84f13..9ce0b4fe23ff 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+ if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
+ goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
@@ -375,8 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum;
}
- bstats_update(&cl->bstats, skb);
-
sch->q.qlen++;
return err;
}
@@ -402,6 +401,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+
+ bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c790204d042..3886365cc207 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
drop:
- kfree_skb(skb);
- sch->qstats.drops++;
+ qdisc_drop(skb, sch);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
@@ -429,8 +428,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
- NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
+ if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
+ nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -447,13 +447,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
+ if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
+ goto nla_put_failure;
- if (p->default_index != NO_DEFAULT_INDEX)
- NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
+ if (p->default_index != NO_DEFAULT_INDEX &&
+ nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
+ goto nla_put_failure;
- if (p->set_tc_index)
- NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
+ if (p->set_tc_index &&
+ nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 66effe2da8e0..e15a9eb29087 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct tc_fifo_qopt opt = { .limit = sch->limit };
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
new file mode 100644
index 000000000000..9fc1c62ec80e
--- /dev/null
+++ b/net/sched/sch_fq_codel.c
@@ -0,0 +1,626 @@
+/*
+ * Fair Queue CoDel discipline
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/flow_keys.h>
+#include <net/codel.h>
+
+/* Fair Queue CoDel.
+ *
+ * Principles :
+ * Packets are classified (internal classifier or external) on flows.
+ * This is a Stochastic model (as we use a hash, several flows
+ * might be hashed on same slot)
+ * Each flow has a CoDel managed queue.
+ * Flows are linked onto two (Round Robin) lists,
+ * so that new flows have priority on old ones.
+ *
+ * For a given flow, packets are not reordered (CoDel uses a FIFO)
+ * head drops only.
+ * ECN capability is on by default.
+ * Low memory footprint (64 bytes per flow)
+ */
+
+struct fq_codel_flow {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head flowchain;
+ int deficit;
+ u32 dropped; /* number of drops (or ECN marks) on this flow */
+ struct codel_vars cvars;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct fq_codel_sched_data {
+ struct tcf_proto *filter_list; /* optional external classifier */
+ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
+ u32 *backlogs; /* backlog table [flows_cnt] */
+ u32 flows_cnt; /* number of flows */
+ u32 perturbation; /* hash perturbation */
+ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
+ struct codel_params cparams;
+ struct codel_stats cstats;
+ u32 drop_overlimit;
+ u32 new_flow_count;
+
+ struct list_head new_flows; /* list of new flows */
+ struct list_head old_flows; /* list of old flows */
+};
+
+static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
+ const struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ unsigned int hash;
+
+ skb_flow_dissect(skb, &keys);
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src ^ keys.ip_proto,
+ (__force u32)keys.ports, q->perturbation);
+ return ((u64)hash * q->flows_cnt) >> 32;
+}
+
+static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
+ int *qerr)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->flows_cnt)
+ return TC_H_MIN(skb->priority);
+
+ if (!q->filter_list)
+ return fq_codel_hash(q, skb) + 1;
+
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return 0;
+ }
+#endif
+ if (TC_H_MIN(res.classid) <= q->flows_cnt)
+ return TC_H_MIN(res.classid);
+ }
+ return 0;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+
+/* remove one skb from head of slot queue */
+static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ flow->head = skb->next;
+ skb->next = NULL;
+ return skb;
+}
+
+/* add skb to flow queue (tail add) */
+static inline void flow_queue_add(struct fq_codel_flow *flow,
+ struct sk_buff *skb)
+{
+ if (flow->head == NULL)
+ flow->head = skb;
+ else
+ flow->tail->next = skb;
+ flow->tail = skb;
+ skb->next = NULL;
+}
+
+static unsigned int fq_codel_drop(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ unsigned int maxbacklog = 0, idx = 0, i, len;
+ struct fq_codel_flow *flow;
+
+ /* Queue is full! Find the fat flow and drop packet from it.
+ * This might sound expensive, but with 1024 flows, we scan
+ * 4KB of memory, and we dont need to handle a complex tree
+ * in fast path (packet queue/enqueue) with many cache misses.
+ */
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (q->backlogs[i] > maxbacklog) {
+ maxbacklog = q->backlogs[i];
+ idx = i;
+ }
+ }
+ flow = &q->flows[idx];
+ skb = dequeue_head(flow);
+ len = qdisc_pkt_len(skb);
+ q->backlogs[idx] -= len;
+ kfree_skb(skb);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ flow->dropped++;
+ return idx;
+}
+
+static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int idx;
+ struct fq_codel_flow *flow;
+ int uninitialized_var(ret);
+
+ idx = fq_codel_classify(skb, sch, &ret);
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+ }
+ idx--;
+
+ codel_set_enqueue_time(skb);
+ flow = &q->flows[idx];
+ flow_queue_add(flow, skb);
+ q->backlogs[idx] += qdisc_pkt_len(skb);
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ if (list_empty(&flow->flowchain)) {
+ list_add_tail(&flow->flowchain, &q->new_flows);
+ codel_vars_init(&flow->cvars);
+ q->new_flow_count++;
+ flow->deficit = q->quantum;
+ flow->dropped = 0;
+ }
+ if (++sch->q.qlen < sch->limit)
+ return NET_XMIT_SUCCESS;
+
+ q->drop_overlimit++;
+ /* Return Congestion Notification only if we dropped a packet
+ * from this flow.
+ */
+ if (fq_codel_drop(sch) == idx)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
+}
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct fq_codel_flow *flow;
+ struct sk_buff *skb = NULL;
+
+ flow = container_of(vars, struct fq_codel_flow, cvars);
+ if (flow->head) {
+ skb = dequeue_head(flow);
+ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ }
+ return skb;
+}
+
+static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ struct fq_codel_flow *flow;
+ struct list_head *head;
+ u32 prev_drop_count, prev_ecn_mark;
+
+begin:
+ head = &q->new_flows;
+ if (list_empty(head)) {
+ head = &q->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+ flow = list_first_entry(head, struct fq_codel_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += q->quantum;
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ goto begin;
+ }
+
+ prev_drop_count = q->cstats.drop_count;
+ prev_ecn_mark = q->cstats.ecn_mark;
+
+ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ dequeue);
+
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ else
+ list_del_init(&flow->flowchain);
+ goto begin;
+ }
+ qdisc_bstats_update(sch, skb);
+ flow->deficit -= qdisc_pkt_len(skb);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->cstats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+ }
+ return skb;
+}
+
+static void fq_codel_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_codel_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
+ [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
+};
+
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
+ if (err < 0)
+ return err;
+ if (tb[TCA_FQ_CODEL_FLOWS]) {
+ if (q->flows)
+ return -EINVAL;
+ q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
+ if (!q->flows_cnt ||
+ q->flows_cnt > 65536)
+ return -EINVAL;
+ }
+ sch_tree_lock(sch);
+
+ if (tb[TCA_FQ_CODEL_TARGET]) {
+ u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
+
+ q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_INTERVAL]) {
+ u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
+
+ q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+
+ if (tb[TCA_FQ_CODEL_ECN])
+ q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+
+ if (tb[TCA_FQ_CODEL_QUANTUM])
+ q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
+
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_codel_dequeue(sch);
+
+ kfree_skb(skb);
+ q->cstats.drop_count++;
+ }
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static void *fq_codel_zalloc(size_t sz)
+{
+ void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+ if (!ptr)
+ ptr = vzalloc(sz);
+ return ptr;
+}
+
+static void fq_codel_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static void fq_codel_destroy(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ fq_codel_free(q->backlogs);
+ fq_codel_free(q->flows);
+}
+
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
+
+ sch->limit = 10*1024;
+ q->flows_cnt = 1024;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->perturbation = net_random();
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ codel_params_init(&q->cparams);
+ codel_stats_init(&q->cstats);
+ q->cparams.ecn = true;
+
+ if (opt) {
+ int err = fq_codel_change(sch, opt);
+ if (err)
+ return err;
+ }
+
+ if (!q->flows) {
+ q->flows = fq_codel_zalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow));
+ if (!q->flows)
+ return -ENOMEM;
+ q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+ if (!q->backlogs) {
+ fq_codel_free(q->flows);
+ return -ENOMEM;
+ }
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ }
+ }
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
+ codel_time_to_us(q->cparams.target)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
+ codel_time_to_us(q->cparams.interval)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_ECN,
+ q->cparams.ecn) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
+ q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
+ q->flows_cnt))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tc_fq_codel_xstats st = {
+ .type = TCA_FQ_CODEL_XSTATS_QDISC,
+ };
+ struct list_head *pos;
+
+ st.qdisc_stats.maxpacket = q->cstats.maxpacket;
+ st.qdisc_stats.drop_overlimit = q->drop_overlimit;
+ st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
+ st.qdisc_stats.new_flow_count = q->new_flow_count;
+
+ list_for_each(pos, &q->new_flows)
+ st.qdisc_stats.new_flows_len++;
+
+ list_for_each(pos, &q->old_flows)
+ st.qdisc_stats.old_flows_len++;
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static void fq_codel_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ u32 idx = cl - 1;
+ struct gnet_stats_queue qs = { 0 };
+ struct tc_fq_codel_xstats xstats;
+
+ if (idx < q->flows_cnt) {
+ const struct fq_codel_flow *flow = &q->flows[idx];
+ const struct sk_buff *skb = flow->head;
+
+ memset(&xstats, 0, sizeof(xstats));
+ xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
+ xstats.class_stats.deficit = flow->deficit;
+ xstats.class_stats.ldelay =
+ codel_time_to_us(flow->cvars.ldelay);
+ xstats.class_stats.count = flow->cvars.count;
+ xstats.class_stats.lastcount = flow->cvars.lastcount;
+ xstats.class_stats.dropping = flow->cvars.dropping;
+ if (flow->cvars.dropping) {
+ codel_tdiff_t delta = flow->cvars.drop_next -
+ codel_get_time();
+
+ xstats.class_stats.drop_next = (delta >= 0) ?
+ codel_time_to_us(delta) :
+ -codel_time_to_us(-delta);
+ }
+ while (skb) {
+ qs.qlen++;
+ skb = skb->next;
+ }
+ qs.backlog = q->backlogs[idx];
+ qs.drops = flow->dropped;
+ }
+ if (gnet_stats_copy_queue(d, &qs) < 0)
+ return -1;
+ if (idx < q->flows_cnt)
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+ return 0;
+}
+
+static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (list_empty(&q->flows[i].flowchain) ||
+ arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops fq_codel_class_ops = {
+ .leaf = fq_codel_leaf,
+ .get = fq_codel_get,
+ .put = fq_codel_put,
+ .tcf_chain = fq_codel_find_tcf,
+ .bind_tcf = fq_codel_bind,
+ .unbind_tcf = fq_codel_put,
+ .dump = fq_codel_dump_class,
+ .dump_stats = fq_codel_dump_class_stats,
+ .walk = fq_codel_walk,
+};
+
+static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
+ .cl_ops = &fq_codel_class_ops,
+ .id = "fq_codel",
+ .priv_size = sizeof(struct fq_codel_sched_data),
+ .enqueue = fq_codel_enqueue,
+ .dequeue = fq_codel_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .drop = fq_codel_drop,
+ .init = fq_codel_init,
+ .reset = fq_codel_reset,
+ .destroy = fq_codel_destroy,
+ .change = fq_codel_change,
+ .dump = fq_codel_dump,
+ .dump_stats = fq_codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init fq_codel_module_init(void)
+{
+ return register_qdisc(&fq_codel_qdisc_ops);
+}
+
+static void __exit fq_codel_module_exit(void)
+{
+ unregister_qdisc(&fq_codel_qdisc_ops);
+}
+
+module_init(fq_codel_module_init)
+module_exit(fq_codel_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 67fc573e013a..511323e89cec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -86,9 +86,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* deadloop is detected. Return OK to try the next skb.
*/
kfree_skb(skb);
- if (net_ratelimit())
- pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
- dev_queue->dev->name);
+ net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
+ dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
@@ -136,9 +135,9 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = handle_dev_cpu_collision(skb, txq, q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
- if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
- pr_warning("BUG %s code %d qlen %d\n",
- dev->name, ret, q->q.qlen);
+ if (unlikely(ret != NETDEV_TX_BUSY))
+ net_warn_ratelimited("BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
@@ -512,7 +511,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0b15236be7b6..e901583e4ea5 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -255,10 +255,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
- if (net_ratelimit())
- pr_warning("GRED: Unable to relocate VQ 0x%x "
- "after dequeue, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
+ tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
@@ -287,10 +285,8 @@ static unsigned int gred_drop(struct Qdisc *sch)
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
- if (net_ratelimit())
- pr_warning("GRED: Unable to relocate VQ 0x%x "
- "while dropping, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
+ tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
@@ -521,14 +517,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+ if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
+ goto nla_put_failure;
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
max_p[i] = q ? q->parms.max_P : 0;
}
- NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+ if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
+ goto nla_put_failure;
parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL)
@@ -565,11 +563,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.packets = q->packetsin;
opt.bytesin = q->bytesin;
- if (gred_wred_mode(table)) {
- q->vars.qidlestart =
- table->tab[table->def]->vars.qidlestart;
- q->vars.qavg = table->tab[table->def]->vars.qavg;
- }
+ if (gred_wred_mode(table))
+ gred_load_wred_set(table, q);
opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9bdca2e011e9..6c2ec4510540 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
tsc.m1 = sm2m(sc->sm1);
tsc.d = dx2d(sc->dx);
tsc.m2 = sm2m(sc->sm2);
- NLA_PUT(skb, attr, sizeof(tsc), &tsc);
+ if (nla_put(skb, attr, sizeof(tsc), &tsc))
+ goto nla_put_failure;
return skb->len;
@@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
}
qopt.defcls = q->defcls;
- NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
@@ -1607,7 +1609,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
- bstats_update(&cl->bstats, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1655,6 +1656,7 @@ hfsc_dequeue(struct Qdisc *sch)
return NULL;
}
+ bstats_update(&cl->bstats, skb);
update_vf(cl, qdisc_pkt_len(skb), cur_time);
if (realtime)
cl->cl_cumul += qdisc_pkt_len(skb);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942ce9e82..9d75b7761313 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
} else {
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_DROP;
+ return qdisc_drop(skb, sch);
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
@@ -576,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
return ret;
} else {
- bstats_update(&cl->bstats, skb);
htb_activate(q, cl);
}
@@ -837,6 +834,7 @@ next:
} while (cl != start);
if (likely(skb != NULL)) {
+ bstats_update(&cl->bstats, skb);
cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
if (cl->un.leaf.deficit[level] < 0) {
cl->un.leaf.deficit[level] += cl->quantum;
@@ -1051,7 +1049,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
+ if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
spin_unlock_bh(root_lock);
@@ -1090,7 +1089,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
opt.quantum = cl->quantum;
opt.prio = cl->prio;
opt.level = cl->level;
- NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 28de43092330..d1831ca966d4 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.offset[i] = dev->tc_to_txq[i].offset;
}
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
nla_put_failure:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 49131d7a7446..2a2b096d9a66 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.bands = q->bands;
opt.max_bands = q->max_bands;
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..a2a95aabf9c2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -26,6 +26,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
#define VERSION "1.3"
@@ -78,6 +79,7 @@ struct netem_sched_data {
psched_tdiff_t jitter;
u32 loss;
+ u32 ecn;
u32 limit;
u32 counter;
u32 gap;
@@ -374,9 +376,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
++count;
/* Drop packet? */
- if (loss_event(q))
- --count;
-
+ if (loss_event(q)) {
+ if (q->ecn && INET_ECN_set_ce(skb))
+ sch->qstats.drops++; /* mark packet */
+ else
+ --count;
+ }
if (count == 0) {
sch->qstats.drops++;
kfree_skb(skb);
@@ -408,10 +413,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb))) {
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
+ skb_checksum_help(skb)))
+ return qdisc_drop(skb, sch);
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
@@ -706,6 +709,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
[TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
[TCA_NETEM_LOSS] = { .type = NLA_NESTED },
+ [TCA_NETEM_ECN] = { .type = NLA_U32 },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -776,6 +780,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_RATE])
get_rate(sch, tb[TCA_NETEM_RATE]);
+ if (tb[TCA_NETEM_ECN])
+ q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
+
q->loss_model = CLG_RANDOM;
if (tb[TCA_NETEM_LOSS])
ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -834,7 +841,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
.p23 = q->clg.a5,
};
- NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+ if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
+ goto nla_put_failure;
break;
}
case CLG_GILB_ELL: {
@@ -845,7 +853,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
.k1 = q->clg.a4,
};
- NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+ if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
+ goto nla_put_failure;
break;
}
}
@@ -874,26 +883,34 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
qopt.loss = q->loss;
qopt.gap = q->gap;
qopt.duplicate = q->duplicate;
- NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+ goto nla_put_failure;
cor.delay_corr = q->delay_cor.rho;
cor.loss_corr = q->loss_cor.rho;
cor.dup_corr = q->dup_cor.rho;
- NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
+ if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
+ goto nla_put_failure;
reorder.probability = q->reorder;
reorder.correlation = q->reorder_cor.rho;
- NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
+ if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
+ goto nla_put_failure;
corrupt.probability = q->corrupt;
corrupt.correlation = q->corrupt_cor.rho;
- NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+ if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
+ goto nla_put_failure;
rate.rate = q->rate;
rate.packet_overhead = q->packet_overhead;
rate.cell_size = q->cell_size;
rate.cell_overhead = q->cell_overhead;
- NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+ if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
+ goto nla_put_failure;
+
+ if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
+ goto nla_put_failure;
if (dump_loss_model(q, skb) != 0)
goto nla_put_failure;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b5d56a22b1d2..79359b69ad8d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e68cb440756a..9af01f3df18c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
- NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
+ if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
+ nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+ goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a5cc3012cf42..633e32defdcc 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
- NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
+ if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
+ nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d7eea99333e9..74305c883bd3 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch->qstats.backlog = q->qdisc->qstats.backlog;
opts = nla_nest_start(skb, TCA_OPTIONS);
- NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 02a21abea65e..d3a1bc26dbfc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
opt.flags = q->flags;
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
return skb->len;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b8e156319d7b..4b056c15e90c 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
opt.mtu = q->mtu;
opt.buffer = q->buffer;
- NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
+ if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45326599fda3..ca0c29695d51 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_DROP;
+ return qdisc_drop(skb, sch);
}
static struct sk_buff *
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index acd2edbc073e..5bc9ab161b37 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
{
struct sctp_chunk *sack;
struct timer_list *timer;
@@ -1465,7 +1465,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
}
/* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
{
int rx_count;
int over = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af71384..80564fe03024 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- if (sk_add_backlog(sk, skb))
+ if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
sctp_chunk_free(chunk);
else
backloged = 1;
@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
- ret = sk_add_backlog(sk, skb);
+ ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f41..f1b7d4bb591e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
skb_set_owner_w(nskb, sk);
- /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
- if (!dst || (dst->obsolete > 1)) {
- dst_release(dst);
+ if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
sctp_assoc_sync_pmtu(asoc);
@@ -663,8 +661,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
*/
if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
inflight && sctp_state(asoc, ESTABLISHED)) {
- unsigned max = transport->pathmtu - packet->overhead;
- unsigned len = chunk->skb->len + q->out_qlen;
+ unsigned int max = transport->pathmtu - packet->overhead;
+ unsigned int len = chunk->skb->len + q->out_qlen;
/* Check whether this chunk and all the rest of pending
* data will fit or delay in hopes of bundling a full
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index cfeb1d4a1ee6..a0fa19f5650c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1147,7 +1147,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
__u32 sack_ctsn, ctsn, tsn;
__u32 highest_tsn, highest_new_tsn;
__u32 sack_a_rwnd;
- unsigned outstanding;
+ unsigned int outstanding;
struct sctp_transport *primary = asoc->peer.primary_path;
int count_of_newacks = 0;
int gap_ack_blocks;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1ff51c9d18d5..c96d1a81cf42 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -524,7 +524,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
/* Worker routine to handle INIT command failure. */
static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
struct sctp_association *asoc,
- unsigned error)
+ unsigned int error)
{
struct sctp_ulpevent *event;
@@ -550,7 +550,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_event_t event_type,
sctp_subtype_t subtype,
struct sctp_chunk *chunk,
- unsigned error)
+ unsigned int error)
{
struct sctp_ulpevent *event;
@@ -1161,9 +1161,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_DISPOSITION_VIOLATION:
- if (net_ratelimit())
- pr_err("protocol violation state %d chunkid %d\n",
- state, subtype.chunk);
+ net_err_ratelimited("protocol violation state %d chunkid %d\n",
+ state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 891f5db8cc31..9fca10357350 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1129,17 +1129,15 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
- if (net_ratelimit())
- pr_warn("%s association %p could not find address %pI6\n",
- __func__,
- asoc,
- &from_addr.v6.sin6_addr);
+ net_warn_ratelimited("%s association %p could not find address %pI6\n",
+ __func__,
+ asoc,
+ &from_addr.v6.sin6_addr);
} else {
- if (net_ratelimit())
- pr_warn("%s association %p could not find address %pI4\n",
- __func__,
- asoc,
- &from_addr.v4.sin_addr.s_addr);
+ net_warn_ratelimited("%s association %p could not find address %pI4\n",
+ __func__,
+ asoc,
+ &from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
@@ -2410,7 +2408,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- unsigned len;
+ unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
/* See if we have an error cause code in the chunk. */
@@ -2446,7 +2444,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- unsigned len;
+ unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
if (!sctp_vtag_verify_either(chunk, asoc))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 92ba71dfe080..b3b8a8d813eb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5840,10 +5840,8 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
- if (net_ratelimit()) {
- pr_info("failed to load transform for %s: %ld\n",
- sctp_hmac_alg, PTR_ERR(tfm));
- }
+ net_info_ratelimited("failed to load transform for %s: %ld\n",
+ sctp_hmac_alg, PTR_ERR(tfm));
return -ENOSYS;
}
sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 60ffbd067ff7..e5fe639c89e7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,22 +275,16 @@ static ctl_table sctp_table[] = {
{ /* sentinel */ }
};
-static struct ctl_path sctp_path[] = {
- { .procname = "net", },
- { .procname = "sctp", },
- { }
-};
-
static struct ctl_table_header * sctp_sysctl_header;
/* Sysctl registration. */
void sctp_sysctl_register(void)
{
- sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table);
+ sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
}
/* Sysctl deregistration. */
void sctp_sysctl_unregister(void)
{
- unregister_sysctl_table(sctp_sysctl_header);
+ unregister_net_sysctl_table(sctp_sysctl_header);
}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b04..b026ba0c6992 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-/* this is a complete rip-off from __sk_dst_check
- * the cookie is always 0 since this is how it's used in the
- * pmtu code
- */
-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
-{
- struct dst_entry *dst = t->dst;
-
- if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
- dst_release(t->dst);
- t->dst = NULL;
- return NULL;
- }
-
- return dst;
-}
-
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
diff --git a/net/socket.c b/net/socket.c
index 851edcd6b098..6e0ccc09b313 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -479,7 +479,7 @@ static struct socket *sock_alloc(void)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- percpu_add(sockets_in_use, 1);
+ this_cpu_add(sockets_in_use, 1);
return sock;
}
@@ -522,7 +522,7 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
- percpu_sub(sockets_in_use, 1);
+ this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
@@ -1234,8 +1234,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
*/
sock = sock_alloc();
if (!sock) {
- if (net_ratelimit())
- printk(KERN_WARNING "socket: no more sockets\n");
+ net_warn_ratelimited("socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
@@ -1479,7 +1478,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
- if ((unsigned)backlog > somaxconn)
+ if ((unsigned int)backlog > somaxconn)
backlog = somaxconn;
err = security_socket_listen(sock, backlog);
@@ -1691,7 +1690,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
*/
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
- unsigned, flags, struct sockaddr __user *, addr,
+ unsigned int, flags, struct sockaddr __user *, addr,
int, addr_len)
{
struct socket *sock;
@@ -1738,7 +1737,7 @@ out:
*/
SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
- unsigned, flags)
+ unsigned int, flags)
{
return sys_sendto(fd, buff, len, flags, NULL, 0);
}
@@ -1750,7 +1749,7 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
*/
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
- unsigned, flags, struct sockaddr __user *, addr,
+ unsigned int, flags, struct sockaddr __user *, addr,
int __user *, addr_len)
{
struct socket *sock;
@@ -1795,7 +1794,7 @@ out:
*/
asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
- unsigned flags)
+ unsigned int flags)
{
return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
}
@@ -1897,7 +1896,7 @@ struct used_address {
};
static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags,
+ struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
{
struct compat_msghdr __user *msg_compat =
@@ -1908,7 +1907,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
__attribute__ ((aligned(sizeof(__kernel_size_t))));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
- int err, ctl_len, iov_size, total_len;
+ int err, ctl_len, total_len;
err = -EFAULT;
if (MSG_CMSG_COMPAT & flags) {
@@ -1917,16 +1916,13 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
- /* do not move before msg_sys is valid */
- err = -EMSGSIZE;
- if (msg_sys->msg_iovlen > UIO_MAXIOV)
- goto out;
-
- /* Check whether to allocate the iovec area */
- err = -ENOMEM;
- iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
- iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ err = -EMSGSIZE;
+ if (msg_sys->msg_iovlen > UIO_MAXIOV)
+ goto out;
+ err = -ENOMEM;
+ iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+ GFP_KERNEL);
if (!iov)
goto out;
}
@@ -2005,7 +2001,7 @@ out_freectl:
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out_freeiov:
if (iov != iovstack)
- sock_kfree_s(sock->sk, iov, iov_size);
+ kfree(iov);
out:
return err;
}
@@ -2014,7 +2010,7 @@ out:
* BSD sendmsg interface
*/
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
{
int fput_needed, err;
struct msghdr msg_sys;
@@ -2096,14 +2092,14 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
}
static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags, int nosec)
+ struct msghdr *msg_sys, unsigned int flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
unsigned long cmsg_ptr;
- int err, iov_size, total_len, len;
+ int err, total_len, len;
/* kernel mode address */
struct sockaddr_storage addr;
@@ -2118,15 +2114,13 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
- err = -EMSGSIZE;
- if (msg_sys->msg_iovlen > UIO_MAXIOV)
- goto out;
-
- /* Check whether to allocate the iovec area */
- err = -ENOMEM;
- iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
if (msg_sys->msg_iovlen > UIO_FASTIOV) {
- iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ err = -EMSGSIZE;
+ if (msg_sys->msg_iovlen > UIO_MAXIOV)
+ goto out;
+ err = -ENOMEM;
+ iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+ GFP_KERNEL);
if (!iov)
goto out;
}
@@ -2180,7 +2174,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
out_freeiov:
if (iov != iovstack)
- sock_kfree_s(sock->sk, iov, iov_size);
+ kfree(iov);
out:
return err;
}
@@ -2524,6 +2518,12 @@ EXPORT_SYMBOL(sock_unregister);
static int __init sock_init(void)
{
int err;
+ /*
+ * Initialize the network sysctl infrastructure.
+ */
+ err = net_sysctl_init();
+ if (err)
+ goto out;
/*
* Initialize sock SLAB cache.
@@ -3223,7 +3223,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
return -ENOIOCTLCMD;
}
-static long compat_sock_ioctl(struct file *file, unsigned cmd,
+static long compat_sock_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct socket *sock = file->private_data;
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 75762f346975..6ed6f201b022 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -160,8 +160,8 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
if (gcred->acred.group_info->ngroups != acred->group_info->ngroups)
goto out_nomatch;
for (i = 0; i < gcred->acred.group_info->ngroups; i++) {
- if (GROUP_AT(gcred->acred.group_info, i) !=
- GROUP_AT(acred->group_info, i))
+ if (!gid_eq(GROUP_AT(gcred->acred.group_info, i),
+ GROUP_AT(acred->group_info, i)))
goto out_nomatch;
}
out_match:
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 8eff8c32d1b9..d3611f11a8df 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -624,7 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
ctx->seq_send = ctx->seq_send64;
if (ctx->seq_send64 != ctx->seq_send) {
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
- (long unsigned)ctx->seq_send64, ctx->seq_send);
+ (unsigned long)ctx->seq_send64, ctx->seq_send);
p = ERR_PTR(-EINVAL);
goto out_err;
}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c7..782bfe1b6465 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
{
struct gss_api_mech *pos = NULL;
- int i = 0;
+ int j, i = 0;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
- array_ptr[i] = pos->gm_pfs->pseudoflavor;
- i++;
+ for (j=0; j < pos->gm_pf_num; j++) {
+ array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
+ }
}
spin_unlock(&registered_mechs_lock);
return i;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index bcb773781ec0..73e957386600 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -41,6 +41,7 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pagemap.h>
+#include <linux/user_namespace.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
@@ -467,9 +468,13 @@ static int rsc_parse(struct cache_detail *cd,
status = -EINVAL;
for (i=0; i<N; i++) {
gid_t gid;
+ kgid_t kgid;
if (get_int(&mesg, &gid))
goto out;
- GROUP_AT(rsci.cred.cr_group_info, i) = gid;
+ kgid = make_kgid(&init_user_ns, gid);
+ if (!gid_valid(kgid))
+ goto out;
+ GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
}
/* mech name */
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index e50502d8ceb7..52c5abdee211 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
+#include <linux/user_namespace.h>
#define NFS_NGROUPS 16
@@ -78,8 +79,11 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
groups = NFS_NGROUPS;
cred->uc_gid = acred->gid;
- for (i = 0; i < groups; i++)
- cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
+ for (i = 0; i < groups; i++) {
+ gid_t gid;
+ gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
+ cred->uc_gids[i] = gid;
+ }
if (i < NFS_NGROUPS)
cred->uc_gids[i] = NOGROUP;
@@ -126,9 +130,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
groups = acred->group_info->ngroups;
if (groups > NFS_NGROUPS)
groups = NFS_NGROUPS;
- for (i = 0; i < groups ; i++)
- if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
+ for (i = 0; i < groups ; i++) {
+ gid_t gid;
+ gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
+ if (cred->uc_gids[i] != gid)
return 0;
+ }
if (groups < NFS_NGROUPS &&
cred->uc_gids[groups] != NOGROUP)
return 0;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index de0b0f39d9d8..47ad2666fdf6 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1273,7 +1273,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
__acquires(cd->hash_lock)
{
loff_t n = *pos;
- unsigned hash, entry;
+ unsigned int hash, entry;
struct cache_head *ch;
struct cache_detail *cd = ((struct handle*)m->private)->cd;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 67972462a543..f56f045778ae 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -127,9 +127,7 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
{
static uint32_t clntid;
char name[15];
- struct qstr q = {
- .name = name,
- };
+ struct qstr q = { .name = name };
struct dentry *dir, *dentry;
int error;
@@ -176,16 +174,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
return 0;
}
-static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
- struct super_block *sb)
+static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
+{
+ if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
+ ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
+ return 1;
+ return 0;
+}
+
+static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
+ struct super_block *sb)
{
struct dentry *dentry;
int err = 0;
switch (event) {
case RPC_PIPEFS_MOUNT:
- if (clnt->cl_program->pipe_dir_name == NULL)
- break;
dentry = rpc_setup_pipedir_sb(sb, clnt,
clnt->cl_program->pipe_dir_name);
BUG_ON(dentry == NULL);
@@ -208,6 +212,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
return err;
}
+static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
+ struct super_block *sb)
+{
+ int error = 0;
+
+ for (;; clnt = clnt->cl_parent) {
+ if (!rpc_clnt_skip_event(clnt, event))
+ error = __rpc_clnt_handle_event(clnt, event, sb);
+ if (error || clnt == clnt->cl_parent)
+ break;
+ }
+ return error;
+}
+
static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
@@ -215,10 +233,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
spin_lock(&sn->rpc_client_lock);
list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
- if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
- ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
+ if (clnt->cl_program->pipe_dir_name == NULL)
+ break;
+ if (rpc_clnt_skip_event(clnt, event))
+ continue;
+ if (atomic_inc_not_zero(&clnt->cl_count) == 0)
continue;
- atomic_inc(&clnt->cl_count);
spin_unlock(&sn->rpc_client_lock);
return clnt;
}
@@ -257,6 +277,14 @@ void rpc_clients_notifier_unregister(void)
return rpc_pipefs_notifier_unregister(&rpc_clients_block);
}
+static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
+{
+ clnt->cl_nodelen = strlen(nodename);
+ if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ clnt->cl_nodelen = UNX_MAXNODENAME;
+ memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
+}
+
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
{
const struct rpc_program *program = args->program;
@@ -337,10 +365,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
}
/* save the nodename */
- clnt->cl_nodelen = strlen(init_utsname()->nodename);
- if (clnt->cl_nodelen > UNX_MAXNODENAME)
- clnt->cl_nodelen = UNX_MAXNODENAME;
- memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
+ rpc_clnt_set_nodename(clnt, utsname()->nodename);
rpc_register_client(clnt);
return clnt;
@@ -499,6 +524,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
+ rpc_clnt_set_nodename(new, utsname()->nodename);
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
atomic_inc(&clnt->cl_count);
@@ -1260,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
}
switch (status) {
+ case -ENOMEM:
+ rpc_delay(task, HZ >> 2);
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0af37fc46818..04040476082e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
/**
* rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
* @msg: message to queue
*
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- * release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
/**
* rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
*/
int rpc_remove_client_dir(struct dentry *dentry)
{
@@ -1059,12 +1057,9 @@ static const struct rpc_filelist files[] = {
struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
- struct qstr dir = {
- .name = dir_name,
- .len = strlen(dir_name),
- .hash = full_name_hash(dir_name, strlen(dir_name)),
- };
+ struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
+ dir.hash = full_name_hash(dir.name, dir.len);
return d_lookup(sb->s_root, &dir);
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
@@ -1118,7 +1113,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &s_ops;
sb->s_time_gran = 1;
- inode = rpc_get_inode(sb, S_IFDIR | 0755);
+ inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
sb->s_root = root = d_make_root(inode);
if (!root)
return -ENOMEM;
@@ -1126,19 +1121,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
NET_NAME(net));
+ sn->pipefs_sb = sb;
err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_MOUNT,
sb);
if (err)
goto err_depopulate;
sb->s_fs_info = get_net(net);
- sn->pipefs_sb = sb;
return 0;
err_depopulate:
blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_UMOUNT,
sb);
+ sn->pipefs_sb = NULL;
__rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
return err;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 4c38b33ab8a8..92509ffe15fc 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -396,6 +396,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
@@ -523,6 +524,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8adfc88e793a..3d6498af9adc 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -75,20 +75,21 @@ static struct pernet_operations sunrpc_net_ops = {
static int __init
init_sunrpc(void)
{
- int err = register_rpc_pipefs();
+ int err = rpc_init_mempool();
if (err)
goto out;
- err = rpc_init_mempool();
- if (err)
- goto out2;
err = rpcauth_init_module();
if (err)
- goto out3;
+ goto out2;
cache_initialize();
err = register_pernet_subsys(&sunrpc_net_ops);
if (err)
+ goto out3;
+
+ err = register_rpc_pipefs();
+ if (err)
goto out4;
#ifdef RPC_DEBUG
rpc_register_sysctl();
@@ -98,11 +99,11 @@ init_sunrpc(void)
return 0;
out4:
- rpcauth_remove_module();
+ unregister_pernet_subsys(&sunrpc_net_ops);
out3:
- rpc_destroy_mempool();
+ rpcauth_remove_module();
out2:
- unregister_rpc_pipefs();
+ rpc_destroy_mempool();
out:
return err;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b7210f5cc893..7e9baaa1e543 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1038,23 +1038,21 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
* Printk the given error with the address of the client that caused it.
*/
static __printf(2, 3)
-int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
+void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
- int r;
char buf[RPC_MAX_ADDRBUFLEN];
- if (!net_ratelimit())
- return 0;
+ va_start(args, fmt);
- printk(KERN_WARNING "svc: %s: ",
- svc_print_addr(rqstp, buf, sizeof(buf)));
+ vaf.fmt = fmt;
+ vaf.va = &args;
- va_start(args, fmt);
- r = vprintk(fmt, args);
- va_end(args);
+ net_warn_ratelimited("svc: %s: %pV",
+ svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
- return r;
+ va_end(args);
}
/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 37a1f664d108..88f2bf671960 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -544,14 +544,11 @@ static void svc_check_conn_limits(struct svc_serv *serv)
struct svc_xprt *xprt = NULL;
spin_lock_bh(&serv->sv_lock);
if (!list_empty(&serv->sv_tempsocks)) {
- if (net_ratelimit()) {
- /* Try to help the admin */
- printk(KERN_NOTICE "%s: too many open "
- "connections, consider increasing %s\n",
- serv->sv_name, serv->sv_maxconn ?
- "the max number of connections." :
- "the number of threads.");
- }
+ /* Try to help the admin */
+ net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
+ serv->sv_name, serv->sv_maxconn ?
+ "max number of connections" :
+ "number of threads");
/*
* Always select the oldest connection. It's not fair,
* but so is life
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 88962cf34377..2777fa896645 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -14,6 +14,7 @@
#include <net/sock.h>
#include <net/ipv6.h>
#include <linux/kernel.h>
+#include <linux/user_namespace.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
#include <linux/sunrpc/clnt.h>
@@ -525,11 +526,15 @@ static int unix_gid_parse(struct cache_detail *cd,
for (i = 0 ; i < gids ; i++) {
int gid;
+ kgid_t kgid;
rv = get_int(&mesg, &gid);
err = -EINVAL;
if (rv)
goto out;
- GROUP_AT(ug.gi, i) = gid;
+ kgid = make_kgid(&init_user_ns, gid);
+ if (!gid_valid(kgid))
+ goto out;
+ GROUP_AT(ug.gi, i) = kgid;
}
ugp = unix_gid_lookup(cd, uid);
@@ -558,6 +563,7 @@ static int unix_gid_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
+ struct user_namespace *user_ns = current_user_ns();
struct unix_gid *ug;
int i;
int glen;
@@ -575,7 +581,7 @@ static int unix_gid_show(struct seq_file *m,
seq_printf(m, "%u %d:", ug->uid, glen);
for (i = 0; i < glen; i++)
- seq_printf(m, " %d", GROUP_AT(ug->gi, i));
+ seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i)));
seq_printf(m, "\n");
return 0;
}
@@ -828,8 +834,12 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
cred->cr_group_info = groups_alloc(slen);
if (cred->cr_group_info == NULL)
return SVC_CLOSE;
- for (i = 0; i < slen; i++)
- GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
+ for (i = 0; i < slen; i++) {
+ kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
+ if (!gid_valid(kgid))
+ goto badcred;
+ GROUP_AT(cred->cr_group_info, i) = kgid;
+ }
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
*authp = rpc_autherr_badverf;
return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 824d32fb3121..a6de09de5d21 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -617,11 +617,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_prot = IPPROTO_UDP;
if (!svc_udp_get_dest_address(rqstp, cmh)) {
- if (net_ratelimit())
- printk(KERN_WARNING
- "svc: received unknown control message %d/%d; "
- "dropping RPC reply datagram\n",
- cmh->cmsg_level, cmh->cmsg_type);
+ net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
+ cmh->cmsg_level, cmh->cmsg_type);
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
@@ -871,18 +868,17 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
if (err == -ENOMEM)
printk(KERN_WARNING "%s: no more sockets!\n",
serv->sv_name);
- else if (err != -EAGAIN && net_ratelimit())
- printk(KERN_WARNING "%s: accept failed (err %d)!\n",
- serv->sv_name, -err);
+ else if (err != -EAGAIN)
+ net_warn_ratelimited("%s: accept failed (err %d)!\n",
+ serv->sv_name, -err);
return NULL;
}
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
err = kernel_getpeername(newsock, sin, &slen);
if (err < 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "%s: peername failed (err %d)!\n",
- serv->sv_name, -err);
+ net_warn_ratelimited("%s: peername failed (err %d)!\n",
+ serv->sv_name, -err);
goto failed; /* aborted connection or whatever */
}
@@ -1012,19 +1008,15 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
* bit set in the fragment length header.
* But apparently no known nfs clients send fragmented
* records. */
- if (net_ratelimit())
- printk(KERN_NOTICE "RPC: multiple fragments "
- "per record not supported\n");
+ net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
goto err_delete;
}
svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
if (svsk->sk_reclen > serv->sv_max_mesg) {
- if (net_ratelimit())
- printk(KERN_NOTICE "RPC: "
- "fragment too large: 0x%08lx\n",
- (unsigned long)svsk->sk_reclen);
+ net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
+ (unsigned long)svsk->sk_reclen);
goto err_delete;
}
}
@@ -1556,7 +1548,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
(char *)&val, sizeof(val));
if (type == SOCK_STREAM)
- sock->sk->sk_reuse = 1; /* allow address reuse */
+ sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
error = kernel_bind(sock, sin, len);
if (error < 0)
goto bummer;
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index dd824341c349..08881d0c9672 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -34,7 +34,7 @@
void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
{
unsigned long init = 0;
- unsigned i;
+ unsigned int i;
rt->timeo = timeo;
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(rpc_init_rtt);
* NB: When computing the smoothed RTT and standard deviation,
* be careful not to produce negative intermediate results.
*/
-void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
+void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
{
long *srtt, *sdrtt;
@@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(rpc_update_rtt);
* read, write, commit - A+4D
* other - timeo
*/
-unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
+unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
{
unsigned long res;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index b97a3dd9a60a..fddcccfcdf76 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1204,7 +1204,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
int (*actor)(struct scatterlist *, void *), void *data)
{
int i, ret = 0;
- unsigned page_len, thislen, page_offset;
+ unsigned int page_len, thislen, page_offset;
struct scatterlist sg[1];
sg_init_table(sg, 1);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 0cbcd1ab49ab..3c83035cdaa9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -783,7 +783,7 @@ static void xprt_update_rtt(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
- unsigned timer = task->tk_msg.rpc_proc->p_timer;
+ unsigned int timer = task->tk_msg.rpc_proc->p_timer;
long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
if (timer) {
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
list_del(&req->rq_list);
goto out_init_req;
}
- req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+ req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
if (!IS_ERR(req))
goto out_init_req;
switch (PTR_ERR(req)) {
case -ENOMEM:
- rpc_delay(task, HZ >> 2);
dprintk("RPC: dynamic allocation of request slot "
"failed! Retrying\n");
+ task->tk_status = -ENOMEM;
break;
case -EAGAIN:
rpc_sleep_on(&xprt->backlog, task, NULL);
dprintk("RPC: waiting for request slot\n");
+ default:
+ task->tk_status = -EAGAIN;
}
- task->tk_status = -EAGAIN;
return;
out_init_req:
task->tk_status = 0;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c3e65aebecc0..e3a6e37cd1c5 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -26,10 +26,6 @@
#include <linux/if_ether.h>
#endif
-#ifdef CONFIG_TR
-#include <linux/if_tr.h>
-#endif
-
static struct ctl_table_set *
net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
{
@@ -59,19 +55,6 @@ static struct ctl_table_root net_sysctl_root = {
.permissions = net_ctl_permissions,
};
-static int net_ctl_ro_header_perms(struct ctl_table_root *root,
- struct nsproxy *namespaces, struct ctl_table *table)
-{
- if (net_eq(namespaces->net_ns, &init_net))
- return table->mode;
- else
- return table->mode & ~0222;
-}
-
-static struct ctl_table_root net_sysctl_ro_root = {
- .permissions = net_ctl_ro_header_perms,
-};
-
static int __net_init sysctl_net_init(struct net *net)
{
setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
@@ -88,34 +71,32 @@ static struct pernet_operations sysctl_pernet_ops = {
.exit = sysctl_net_exit,
};
-static __init int net_sysctl_init(void)
+static struct ctl_table_header *net_header;
+__init int net_sysctl_init(void)
{
- int ret;
+ static struct ctl_table empty[1];
+ int ret = -ENOMEM;
+ /* Avoid limitations in the sysctl implementation by
+ * registering "/proc/sys/net" as an empty directory not in a
+ * network namespace.
+ */
+ net_header = register_sysctl("net", empty);
+ if (!net_header)
+ goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
goto out;
- setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL);
- register_sysctl_root(&net_sysctl_ro_root);
register_sysctl_root(&net_sysctl_root);
out:
return ret;
}
-subsys_initcall(net_sysctl_init);
-
-struct ctl_table_header *register_net_sysctl_table(struct net *net,
- const struct ctl_path *path, struct ctl_table *table)
-{
- return __register_sysctl_paths(&net->sysctls, path, table);
-}
-EXPORT_SYMBOL_GPL(register_net_sysctl_table);
-struct ctl_table_header *register_net_sysctl_rotable(const
- struct ctl_path *path, struct ctl_table *table)
+struct ctl_table_header *register_net_sysctl(struct net *net,
+ const char *path, struct ctl_table *table)
{
- return __register_sysctl_paths(&net_sysctl_ro_root.default_set,
- path, table);
+ return __register_sysctl_table(&net->sysctls, path, table);
}
-EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
+EXPORT_SYMBOL_GPL(register_net_sysctl);
void unregister_net_sysctl_table(struct ctl_table_header *header)
{
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 521d24d04ab2..6cd55d671d3a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,5 +9,3 @@ tipc-y += addr.o bcast.o bearer.o config.o \
name_distr.o subscr.o name_table.o net.o \
netlink.o node.o node_subscr.o port.o ref.o \
socket.o log.o eth_media.o
-
-# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index a6fdab33877e..357b74b26f9e 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -45,7 +45,6 @@
*
* Returns 1 if domain address is valid, otherwise 0
*/
-
int tipc_addr_domain_valid(u32 addr)
{
u32 n = tipc_node(addr);
@@ -66,7 +65,6 @@ int tipc_addr_domain_valid(u32 addr)
*
* Returns 1 if address can be used, otherwise 0
*/
-
int tipc_addr_node_valid(u32 addr)
{
return tipc_addr_domain_valid(addr) && tipc_node(addr);
@@ -86,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr)
/**
* tipc_addr_scope - convert message lookup domain to a 2-bit scope value
*/
-
int tipc_addr_scope(u32 domain)
{
if (likely(!domain))
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index e4f35afe3207..60b00ab93d74 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -50,18 +50,33 @@ static inline u32 tipc_cluster_mask(u32 addr)
return addr & TIPC_CLUSTER_MASK;
}
-static inline int in_own_cluster(u32 addr)
+static inline int in_own_cluster_exact(u32 addr)
{
return !((addr ^ tipc_own_addr) >> 12);
}
/**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+static inline int in_own_node(u32 addr)
+{
+ return (addr == tipc_own_addr) || !addr;
+}
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+static inline int in_own_cluster(u32 addr)
+{
+ return in_own_cluster_exact(addr) || !addr;
+}
+
+/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
* Needed when address of a named message must be looked up a second time
* after a network hop.
*/
-
static inline u32 addr_domain(u32 sc)
{
if (likely(sc == TIPC_NODE_SCOPE))
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e00441a2092f..2625f5ebe3e8 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -73,7 +73,6 @@ struct tipc_bcbearer_pair {
* large local variables within multicast routines. Concurrent access is
* prevented through use of the spinlock "bc_lock".
*/
-
struct tipc_bcbearer {
struct tipc_bearer bearer;
struct tipc_media media;
@@ -92,7 +91,6 @@ struct tipc_bcbearer {
*
* Handles sequence numbering, fragmentation, bundling, etc.
*/
-
struct tipc_bclink {
struct tipc_link link;
struct tipc_node node;
@@ -169,7 +167,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
*
* Called with bc_lock locked
*/
-
struct tipc_node *tipc_bclink_retransmit_to(void)
{
return bclink->retransmit_to;
@@ -182,7 +179,6 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
*
* Called with bc_lock locked
*/
-
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
@@ -200,7 +196,6 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
*
* Node is locked, bc_lock unlocked.
*/
-
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
struct sk_buff *crs;
@@ -280,7 +275,6 @@ exit:
*
* tipc_net_lock and node lock set
*/
-
void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
{
struct sk_buff *buf;
@@ -344,7 +338,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
*
* Only tipc_net_lock set.
*/
-
static void bclink_peek_nack(struct tipc_msg *msg)
{
struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
@@ -365,7 +358,6 @@ static void bclink_peek_nack(struct tipc_msg *msg)
/*
* tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
*/
-
int tipc_bclink_send_msg(struct sk_buff *buf)
{
int res;
@@ -394,7 +386,6 @@ exit:
*
* Called with both sending node's lock and bc_lock taken.
*/
-
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
bclink_update_last_sent(node, seqno);
@@ -420,7 +411,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*
* tipc_net_lock is read_locked, no other locks set
*/
-
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -588,7 +578,6 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
* Returns 0 (packet sent successfully) under all circumstances,
* since the broadcast link's pseudo-bearer never blocks
*/
-
static int tipc_bcbearer_send(struct sk_buff *buf,
struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
@@ -601,7 +590,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
* preparation is skipped for broadcast link protocol messages
* since they are sent in an unreliable manner and don't need it
*/
-
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
@@ -618,7 +606,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
}
/* Send buffer over bearers until all targets reached */
-
bcbearer->remains = bclink->bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -660,7 +647,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
/**
* tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
*/
-
void tipc_bcbearer_sort(void)
{
struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
@@ -671,7 +657,6 @@ void tipc_bcbearer_sort(void)
spin_lock_bh(&bc_lock);
/* Group bearers by priority (can assume max of two per priority) */
-
memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
@@ -687,7 +672,6 @@ void tipc_bcbearer_sort(void)
}
/* Create array of bearer pairs for broadcasting */
-
bp_curr = bcbearer->bpairs;
memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
@@ -817,7 +801,6 @@ void tipc_bclink_stop(void)
/**
* tipc_nmap_add - add a node to a node map
*/
-
void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
{
int n = tipc_node(node);
@@ -833,7 +816,6 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
/**
* tipc_nmap_remove - remove a node from a node map
*/
-
void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
{
int n = tipc_node(node);
@@ -852,7 +834,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
* @nm_b: input node map B
* @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
*/
-
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff)
@@ -878,7 +859,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
/**
* tipc_port_list_add - add a port to a port list, ensuring no duplicates
*/
-
void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
{
struct tipc_port_list *item = pl_ptr;
@@ -912,7 +892,6 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
* tipc_port_list_free - free dynamically created entries in port_list chain
*
*/
-
void tipc_port_list_free(struct tipc_port_list *pl_ptr)
{
struct tipc_port_list *item;
@@ -923,4 +902,3 @@ void tipc_port_list_free(struct tipc_port_list *pl_ptr)
kfree(item);
}
}
-
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5571394098f9..a93306557e00 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -45,7 +45,6 @@
* @count: # of nodes in set
* @map: bitmap of node identifiers that are in the set
*/
-
struct tipc_node_map {
u32 count;
u32 map[MAX_NODES / WSIZE];
@@ -59,7 +58,6 @@ struct tipc_node_map {
* @next: pointer to next entry in list
* @ports: array of port references
*/
-
struct tipc_port_list {
int count;
struct tipc_port_list *next;
@@ -77,7 +75,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
/**
* tipc_nmap_equal - test for equality of node maps
*/
-
static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
{
return !memcmp(nm_a, nm_b, sizeof(*nm_a));
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 5dfd89c40429..a297e3a2e3e7 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -53,7 +53,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr);
*
* Returns 1 if media name is valid, otherwise 0.
*/
-
static int media_name_valid(const char *name)
{
u32 len;
@@ -67,7 +66,6 @@ static int media_name_valid(const char *name)
/**
* tipc_media_find - locates specified media object by name
*/
-
struct tipc_media *tipc_media_find(const char *name)
{
u32 i;
@@ -82,7 +80,6 @@ struct tipc_media *tipc_media_find(const char *name)
/**
* media_find_id - locates specified media object by type identifier
*/
-
static struct tipc_media *media_find_id(u8 type)
{
u32 i;
@@ -99,7 +96,6 @@ static struct tipc_media *media_find_id(u8 type)
*
* Bearers for this media type must be activated separately at a later stage.
*/
-
int tipc_register_media(struct tipc_media *m_ptr)
{
int res = -EINVAL;
@@ -134,7 +130,6 @@ exit:
/**
* tipc_media_addr_printf - record media address in print buffer
*/
-
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
@@ -156,7 +151,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
/**
* tipc_media_get_names - record names of registered media in buffer
*/
-
struct sk_buff *tipc_media_get_names(void)
{
struct sk_buff *buf;
@@ -183,7 +177,6 @@ struct sk_buff *tipc_media_get_names(void)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
-
static int bearer_name_validate(const char *name,
struct tipc_bearer_names *name_parts)
{
@@ -194,7 +187,6 @@ static int bearer_name_validate(const char *name,
u32 if_len;
/* copy bearer name & ensure length is OK */
-
name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
/* need above in case non-Posix strncpy() doesn't pad with nulls */
strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
@@ -202,7 +194,6 @@ static int bearer_name_validate(const char *name,
return 0;
/* ensure all component parts of bearer name are present */
-
media_name = name_copy;
if_name = strchr(media_name, ':');
if (if_name == NULL)
@@ -212,7 +203,6 @@ static int bearer_name_validate(const char *name,
if_len = strlen(if_name) + 1;
/* validate component parts of bearer name */
-
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
(if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
(strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
@@ -220,7 +210,6 @@ static int bearer_name_validate(const char *name,
return 0;
/* return bearer name components, if necessary */
-
if (name_parts) {
strcpy(name_parts->media_name, media_name);
strcpy(name_parts->if_name, if_name);
@@ -231,7 +220,6 @@ static int bearer_name_validate(const char *name,
/**
* tipc_bearer_find - locates bearer object with matching bearer name
*/
-
struct tipc_bearer *tipc_bearer_find(const char *name)
{
struct tipc_bearer *b_ptr;
@@ -247,7 +235,6 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
/**
* tipc_bearer_find_interface - locates bearer object with matching interface name
*/
-
struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
{
struct tipc_bearer *b_ptr;
@@ -267,7 +254,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
/**
* tipc_bearer_get_names - record names of bearers in buffer
*/
-
struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
@@ -363,7 +349,6 @@ void tipc_continue(struct tipc_bearer *b_ptr)
* the bearer is congested. 'tipc_net_lock' is in read_lock here
* bearer.lock is busy
*/
-
static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
struct tipc_link *l_ptr)
{
@@ -377,7 +362,6 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
* the bearer is congested. 'tipc_net_lock' is in read_lock here,
* bearer.lock is free
*/
-
void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
spin_lock_bh(&b_ptr->lock);
@@ -410,7 +394,6 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
/**
* tipc_bearer_congested - determines if bearer is currently congested
*/
-
int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
if (unlikely(b_ptr->blocked))
@@ -423,7 +406,6 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
/**
* tipc_enable_bearer - enable bearer with the given name
*/
-
int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
{
struct tipc_bearer *b_ptr;
@@ -449,7 +431,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
if (tipc_in_scope(disc_domain, tipc_own_addr)) {
disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
res = 0; /* accept any node in own cluster */
- } else if (in_own_cluster(disc_domain))
+ } else if (in_own_cluster_exact(disc_domain))
res = 0; /* accept specified node in own cluster */
}
if (res) {
@@ -541,7 +523,6 @@ exit:
* tipc_block_bearer(): Block the bearer with the given name,
* and reset all its links
*/
-
int tipc_block_bearer(const char *name)
{
struct tipc_bearer *b_ptr = NULL;
@@ -573,11 +554,10 @@ int tipc_block_bearer(const char *name)
}
/**
- * bearer_disable -
+ * bearer_disable
*
* Note: This routine assumes caller holds tipc_net_lock.
*/
-
static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct tipc_link *l_ptr;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d3eac56b8c21..e3b2be37fb31 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -49,7 +49,6 @@
* - media type identifier located at offset 3
* - remaining bytes vary according to media type
*/
-
#define TIPC_MEDIA_ADDR_SIZE 20
#define TIPC_MEDIA_TYPE_OFFSET 3
@@ -64,7 +63,6 @@
* @media_id: TIPC media type identifier
* @broadcast: non-zero if address is a broadcast address
*/
-
struct tipc_media_addr {
u8 value[TIPC_MEDIA_ADDR_SIZE];
u8 media_id;
@@ -89,7 +87,6 @@ struct tipc_bearer;
* @type_id: TIPC media identifier
* @name: media name
*/
-
struct tipc_media {
int (*send_msg)(struct sk_buff *buf,
struct tipc_bearer *b_ptr,
@@ -216,7 +213,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
* send routine always returns success -- even if the buffer was not sent --
* and let TIPC's link code deal with the undelivered message.
*/
-
static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
struct sk_buff *buf,
struct tipc_media_addr *dest)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index f76d3b15e4e2..c5712a343810 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -131,7 +131,6 @@ static struct sk_buff *tipc_show_stats(void)
tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
/* Use additional tipc_printf()'s to return more info ... */
-
str_len = tipc_printbuf_validate(&pb);
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -191,7 +190,6 @@ static struct sk_buff *cfg_set_own_addr(void)
* configuration commands can't be received until a local configuration
* command to enable the first bearer is received and processed.
*/
-
spin_unlock_bh(&config_lock);
tipc_core_start_net(addr);
spin_lock_bh(&config_lock);
@@ -283,14 +281,12 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
spin_lock_bh(&config_lock);
/* Save request and reply details in a well-known location */
-
req_tlv_area = request_area;
req_tlv_space = request_space;
rep_headroom = reply_headroom;
/* Check command authorization */
-
- if (likely(orig_node == tipc_own_addr)) {
+ if (likely(in_own_node(orig_node))) {
/* command is permitted */
} else if (cmd >= 0x8000) {
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -310,7 +306,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
}
/* Call appropriate processing routine */
-
switch (cmd) {
case TIPC_CMD_NOOP:
rep_tlv_buf = tipc_cfg_reply_none();
@@ -433,7 +428,6 @@ static void cfg_named_msg_event(void *userdata,
struct sk_buff *rep_buf;
/* Validate configuration message header (ignore invalid message) */
-
req_hdr = (struct tipc_cfg_msg_hdr *)msg;
if ((size < sizeof(*req_hdr)) ||
(size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
@@ -443,7 +437,6 @@ static void cfg_named_msg_event(void *userdata,
}
/* Generate reply for request (if can't, return request) */
-
rep_buf = tipc_cfg_do_cmd(orig->node,
ntohs(req_hdr->tcm_type),
msg + sizeof(*req_hdr),
@@ -489,10 +482,23 @@ failed:
return res;
}
+void tipc_cfg_reinit(void)
+{
+ struct tipc_name_seq seq;
+ int res;
+
+ seq.type = TIPC_CFG_SRV;
+ seq.lower = seq.upper = 0;
+ tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq);
+
+ seq.lower = seq.upper = tipc_own_addr;
+ res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
+ if (res)
+ err("Unable to reinitialize configuration service\n");
+}
+
void tipc_cfg_stop(void)
{
- if (config_port_ref) {
- tipc_deleteport(config_port_ref);
- config_port_ref = 0;
- }
+ tipc_deleteport(config_port_ref);
+ config_port_ref = 0;
}
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 80da6ebc2785..1f252f3fa058 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -66,6 +66,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
int headroom);
int tipc_cfg_init(void);
+void tipc_cfg_reinit(void);
void tipc_cfg_stop(void);
#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 68eba03e7955..f7b95239ebda 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -52,14 +52,12 @@
#endif
/* global variables used by multiple sub-systems within TIPC */
-
int tipc_random;
const char tipc_alphabet[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
/* configurable TIPC parameters */
-
u32 tipc_own_addr;
int tipc_max_ports;
int tipc_max_subscriptions;
@@ -77,7 +75,6 @@ int tipc_remote_management;
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
-
struct sk_buff *tipc_buf_acquire(u32 size)
{
struct sk_buff *skb;
@@ -95,7 +92,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
/**
* tipc_core_stop_net - shut down TIPC networking sub-systems
*/
-
static void tipc_core_stop_net(void)
{
tipc_net_stop();
@@ -105,7 +101,6 @@ static void tipc_core_stop_net(void)
/**
* start_net - start TIPC networking sub-systems
*/
-
int tipc_core_start_net(unsigned long addr)
{
int res;
@@ -121,7 +116,6 @@ int tipc_core_start_net(unsigned long addr)
/**
* tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
*/
-
static void tipc_core_stop(void)
{
tipc_netlink_stop();
@@ -137,7 +131,6 @@ static void tipc_core_stop(void)
/**
* tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
*/
-
static int tipc_core_start(void)
{
int res;
@@ -150,9 +143,9 @@ static int tipc_core_start(void)
if (!res)
res = tipc_nametbl_init();
if (!res)
- res = tipc_k_signal((Handler)tipc_subscr_start, 0);
+ res = tipc_subscr_start();
if (!res)
- res = tipc_k_signal((Handler)tipc_cfg_init, 0);
+ res = tipc_cfg_init();
if (!res)
res = tipc_netlink_start();
if (!res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 13837e0e56b1..2a9bb99537b3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -85,7 +85,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
/*
* TIPC_OUTPUT is the destination print buffer for system messages.
*/
-
#ifndef TIPC_OUTPUT
#define TIPC_OUTPUT TIPC_LOG
#endif
@@ -102,7 +101,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
/*
* DBG_OUTPUT is the destination print buffer for debug messages.
*/
-
#ifndef DBG_OUTPUT
#define DBG_OUTPUT TIPC_LOG
#endif
@@ -126,13 +124,11 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
/*
* TIPC-specific error codes
*/
-
#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
/*
* Global configuration variables
*/
-
extern u32 tipc_own_addr;
extern int tipc_max_ports;
extern int tipc_max_subscriptions;
@@ -143,7 +139,6 @@ extern int tipc_remote_management;
/*
* Other global variables
*/
-
extern int tipc_random;
extern const char tipc_alphabet[];
@@ -151,7 +146,6 @@ extern const char tipc_alphabet[];
/*
* Routines available to privileged subsystems
*/
-
extern int tipc_core_start_net(unsigned long);
extern int tipc_handler_start(void);
extern void tipc_handler_stop(void);
@@ -163,7 +157,6 @@ extern void tipc_socket_stop(void);
/*
* TIPC timer and signal code
*/
-
typedef void (*Handler) (unsigned long);
u32 tipc_k_signal(Handler routine, unsigned long argument);
@@ -176,7 +169,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
*
* Timer must be initialized before use (and terminated when no longer needed).
*/
-
static inline void k_init_timer(struct timer_list *timer, Handler routine,
unsigned long argument)
{
@@ -196,7 +188,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
* then an additional jiffy is added to account for the fact that
* the starting time may be in the middle of the current jiffy.
*/
-
static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
{
mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
@@ -212,7 +203,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
* WARNING: Must not be called when holding locks required by the timer's
* timeout routine, otherwise deadlock can occur on SMP systems!
*/
-
static inline void k_cancel_timer(struct timer_list *timer)
{
del_timer_sync(timer);
@@ -229,12 +219,10 @@ static inline void k_cancel_timer(struct timer_list *timer)
* (Do not "enhance" this routine to automatically cancel an active timer,
* otherwise deadlock can arise when a timeout routine calls k_term_timer.)
*/
-
static inline void k_term_timer(struct timer_list *timer)
{
}
-
/*
* TIPC message buffer code
*
@@ -244,7 +232,6 @@ static inline void k_term_timer(struct timer_list *timer)
* Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
* are word aligned for quicker access
*/
-
#define BUF_HEADROOM LL_MAX_HEADER
struct tipc_skb_cb {
@@ -253,7 +240,6 @@ struct tipc_skb_cb {
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
-
static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
{
return (struct tipc_msg *)skb->data;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index c630a21b2bed..ae054cfe179f 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -70,7 +70,6 @@ struct tipc_link_req {
* @dest_domain: network domain of node(s) which should respond to message
* @b_ptr: ptr to bearer issuing message
*/
-
static struct sk_buff *tipc_disc_init_msg(u32 type,
u32 dest_domain,
struct tipc_bearer *b_ptr)
@@ -96,7 +95,6 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
* @node_addr: duplicated node address
* @media_addr: media address advertised by duplicated node
*/
-
static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
struct tipc_media_addr *media_addr)
{
@@ -117,7 +115,6 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
* @buf: buffer containing message
* @b_ptr: bearer that message arrived on
*/
-
void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct tipc_node *n_ptr;
@@ -221,7 +218,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
* the new media address and reset the link to ensure it starts up
* cleanly.
*/
-
if (addr_mismatch) {
if (tipc_link_is_up(link)) {
disc_dupl_alert(b_ptr, orig, &media_addr);
@@ -264,7 +260,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
* Reinitiates discovery process if discovery object has no associated nodes
* and is either not currently searching or is searching at a slow rate
*/
-
static void disc_update(struct tipc_link_req *req)
{
if (!req->num_nodes) {
@@ -280,7 +275,6 @@ static void disc_update(struct tipc_link_req *req)
* tipc_disc_add_dest - increment set of discovered nodes
* @req: ptr to link request structure
*/
-
void tipc_disc_add_dest(struct tipc_link_req *req)
{
req->num_nodes++;
@@ -290,7 +284,6 @@ void tipc_disc_add_dest(struct tipc_link_req *req)
* tipc_disc_remove_dest - decrement set of discovered nodes
* @req: ptr to link request structure
*/
-
void tipc_disc_remove_dest(struct tipc_link_req *req)
{
req->num_nodes--;
@@ -301,7 +294,6 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
* disc_send_msg - send link setup request message
* @req: ptr to link request structure
*/
-
static void disc_send_msg(struct tipc_link_req *req)
{
if (!req->bearer->blocked)
@@ -314,7 +306,6 @@ static void disc_send_msg(struct tipc_link_req *req)
*
* Called whenever a link setup request timer associated with a bearer expires.
*/
-
static void disc_timeout(struct tipc_link_req *req)
{
int max_delay;
@@ -322,7 +313,6 @@ static void disc_timeout(struct tipc_link_req *req)
spin_lock_bh(&req->bearer->lock);
/* Stop searching if only desired node has been found */
-
if (tipc_node(req->domain) && req->num_nodes) {
req->timer_intv = TIPC_LINK_REQ_INACTIVE;
goto exit;
@@ -335,7 +325,6 @@ static void disc_timeout(struct tipc_link_req *req)
* hold at fast polling rate if don't have any associated nodes,
* otherwise hold at slow polling rate
*/
-
disc_send_msg(req);
req->timer_intv *= 2;
@@ -359,7 +348,6 @@ exit:
*
* Returns 0 if successful, otherwise -errno.
*/
-
int tipc_disc_create(struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest, u32 dest_domain)
{
@@ -391,7 +379,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr,
* tipc_disc_delete - destroy object sending periodic link setup requests
* @req: ptr to link request structure
*/
-
void tipc_disc_delete(struct tipc_link_req *req)
{
k_cancel_timer(&req->timer);
@@ -399,4 +386,3 @@ void tipc_disc_delete(struct tipc_link_req *req)
kfree_skb(req->buf);
kfree(req);
}
-
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 527e3f0e165d..90ac9bfa7abb 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -48,7 +48,6 @@
* @tipc_packet_type: used in binding TIPC to Ethernet driver
* @cleanup: work item used when disabling bearer
*/
-
struct eth_bearer {
struct tipc_bearer *bearer;
struct net_device *dev;
@@ -67,7 +66,6 @@ static struct notifier_block notifier;
* Media-dependent "value" field stores MAC address in first 6 bytes
* and zeroes out the remaining bytes.
*/
-
static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
{
memcpy(a->value, mac, ETH_ALEN);
@@ -79,7 +77,6 @@ static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
/**
* send_msg - send a TIPC message out over an Ethernet interface
*/
-
static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
struct tipc_media_addr *dest)
{
@@ -115,7 +112,6 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
* ignores packets sent using Ethernet multicast, and traffic sent to other
* nodes (which can happen if interface is running in promiscuous mode).
*/
-
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -140,7 +136,6 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
/**
* enable_bearer - attach TIPC bearer to an Ethernet interface
*/
-
static int enable_bearer(struct tipc_bearer *tb_ptr)
{
struct net_device *dev = NULL;
@@ -151,7 +146,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
int pending_dev = 0;
/* Find unused Ethernet bearer structure */
-
while (eb_ptr->dev) {
if (!eb_ptr->bearer)
pending_dev++;
@@ -160,7 +154,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
}
/* Find device with specified name */
-
read_lock(&dev_base_lock);
for_each_netdev(&init_net, pdev) {
if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
@@ -174,7 +167,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
return -ENODEV;
/* Create Ethernet bearer for device */
-
eb_ptr->dev = dev;
eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
eb_ptr->tipc_packet_type.dev = dev;
@@ -184,7 +176,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
dev_add_pack(&eb_ptr->tipc_packet_type);
/* Associate TIPC bearer with Ethernet bearer */
-
eb_ptr->bearer = tb_ptr;
tb_ptr->usr_handle = (void *)eb_ptr;
tb_ptr->mtu = dev->mtu;
@@ -198,7 +189,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
*
* This routine must be invoked from a work queue because it can sleep.
*/
-
static void cleanup_bearer(struct work_struct *work)
{
struct eth_bearer *eb_ptr =
@@ -216,7 +206,6 @@ static void cleanup_bearer(struct work_struct *work)
* then get worker thread to complete bearer cleanup. (Can't do cleanup
* here because cleanup code needs to sleep and caller holds spinlocks.)
*/
-
static void disable_bearer(struct tipc_bearer *tb_ptr)
{
struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
@@ -232,7 +221,6 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
* Change the state of the Ethernet bearer (if any) associated with the
* specified device.
*/
-
static int recv_notification(struct notifier_block *nb, unsigned long evt,
void *dv)
{
@@ -281,7 +269,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
/**
* eth_addr2str - convert Ethernet address to string
*/
-
static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
{
if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
@@ -294,7 +281,6 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
/**
* eth_str2addr - convert string to Ethernet address
*/
-
static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
{
char mac[ETH_ALEN];
@@ -314,7 +300,6 @@ static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
/**
* eth_str2addr - convert Ethernet address format to message header format
*/
-
static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
{
memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
@@ -326,7 +311,6 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
/**
* eth_str2addr - convert message header address format to Ethernet format
*/
-
static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
{
if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
@@ -339,7 +323,6 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
/*
* Ethernet media registration info
*/
-
static struct tipc_media eth_media_info = {
.send_msg = send_msg,
.enable_bearer = enable_bearer,
@@ -363,7 +346,6 @@ static struct tipc_media eth_media_info = {
* Register Ethernet media type with TIPC bearer code. Also register
* with OS for notifications about device state changes.
*/
-
int tipc_eth_media_start(void)
{
int res;
@@ -386,7 +368,6 @@ int tipc_eth_media_start(void)
/**
* tipc_eth_media_stop - deactivate Ethernet bearer support
*/
-
void tipc_eth_media_stop(void)
{
if (!eth_started)
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 274c98e164b7..9c6f22ff1c6d 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -129,4 +129,3 @@ void tipc_handler_stop(void)
kmem_cache_destroy(tipc_queue_item_cache);
}
-
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b4b9b30167a3..7a614f43549d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -45,13 +45,11 @@
/*
* Out-of-range value for link session numbers
*/
-
#define INVALID_SESSION 0x10000
/*
* Link state events:
*/
-
#define STARTING_EVT 856384768 /* link processing trigger */
#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
#define TIMEOUT_EVT 560817u /* link timer expired */
@@ -67,7 +65,6 @@
/*
* State value stored in 'exp_msg_count'
*/
-
#define START_CHANGEOVER 100000u
/**
@@ -77,7 +74,6 @@
* @addr_peer: network address of node at far end
* @if_peer: name of interface at far end
*/
-
struct tipc_link_name {
u32 addr_local;
char if_local[TIPC_MAX_IF_NAME];
@@ -105,7 +101,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
/*
* Simple link routines
*/
-
static unsigned int align(unsigned int i)
{
return (i + 3) & ~3u;
@@ -143,7 +138,6 @@ static u32 link_last_sent(struct tipc_link *l_ptr)
/*
* Simple non-static link routines (i.e. referenced outside this file)
*/
-
int tipc_link_is_up(struct tipc_link *l_ptr)
{
if (!l_ptr)
@@ -164,7 +158,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
*
* Returns 1 if link name is valid, otherwise 0.
*/
-
static int link_name_validate(const char *name,
struct tipc_link_name *name_parts)
{
@@ -180,7 +173,6 @@ static int link_name_validate(const char *name,
u32 if_peer_len;
/* copy link name & ensure length is OK */
-
name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
/* need above in case non-Posix strncpy() doesn't pad with nulls */
strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
@@ -188,7 +180,6 @@ static int link_name_validate(const char *name,
return 0;
/* ensure all component parts of link name are present */
-
addr_local = name_copy;
if_local = strchr(addr_local, ':');
if (if_local == NULL)
@@ -206,7 +197,6 @@ static int link_name_validate(const char *name,
if_peer_len = strlen(if_peer) + 1;
/* validate component parts of link name */
-
if ((sscanf(addr_local, "%u.%u.%u%c",
&z_local, &c_local, &n_local, &dummy) != 3) ||
(sscanf(addr_peer, "%u.%u.%u%c",
@@ -220,7 +210,6 @@ static int link_name_validate(const char *name,
return 0;
/* return link name components, if necessary */
-
if (name_parts) {
name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
strcpy(name_parts->if_local, if_local);
@@ -239,13 +228,11 @@ static int link_name_validate(const char *name,
* another thread because tipc_link_delete() always cancels the link timer before
* tipc_node_delete() is called.)
*/
-
static void link_timeout(struct tipc_link *l_ptr)
{
tipc_node_lock(l_ptr->owner);
/* update counters used in statistical profiling of send traffic */
-
l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
l_ptr->stats.queue_sz_counts++;
@@ -278,7 +265,6 @@ static void link_timeout(struct tipc_link *l_ptr)
}
/* do all other link processing performed on a periodic basis */
-
link_check_defragm_bufs(l_ptr);
link_state_event(l_ptr, TIMEOUT_EVT);
@@ -302,7 +288,6 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
*
* Returns pointer to link.
*/
-
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr)
@@ -383,7 +368,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
* This routine must not grab the node lock until after link timer cancellation
* to avoid a potential deadlock situation.
*/
-
void tipc_link_delete(struct tipc_link *l_ptr)
{
if (!l_ptr) {
@@ -419,7 +403,6 @@ static void link_start(struct tipc_link *l_ptr)
* Schedules port for renewed sending of messages after link congestion
* has abated.
*/
-
static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
{
struct tipc_port *p_ptr;
@@ -476,7 +459,6 @@ exit:
* link_release_outqueue - purge link's outbound message queue
* @l_ptr: pointer to link
*/
-
static void link_release_outqueue(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->first_out;
@@ -495,7 +477,6 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
* tipc_link_reset_fragments - purge link's inbound message fragments queue
* @l_ptr: pointer to link
*/
-
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->defragm_buf;
@@ -513,7 +494,6 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
* tipc_link_stop - purge all inbound and outbound messages associated with link
* @l_ptr: pointer to link
*/
-
void tipc_link_stop(struct tipc_link *l_ptr)
{
struct sk_buff *buf;
@@ -569,7 +549,6 @@ void tipc_link_reset(struct tipc_link *l_ptr)
}
/* Clean up all queues: */
-
link_release_outqueue(l_ptr);
kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
@@ -611,8 +590,7 @@ static void link_activate(struct tipc_link *l_ptr)
* @l_ptr: pointer to link
* @event: state machine event to process
*/
-
-static void link_state_event(struct tipc_link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
{
struct tipc_link *other;
u32 cont_intv = l_ptr->continuity_interval;
@@ -785,7 +763,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned event)
* link_bundle_buf(): Append contents of a buffer to
* the tail of an existing one.
*/
-
static int link_bundle_buf(struct tipc_link *l_ptr,
struct sk_buff *bundler,
struct sk_buff *buf)
@@ -860,7 +837,6 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
* inside TIPC when the 'fast path' in tipc_send_buf
* has failed, and from link_send()
*/
-
int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -872,7 +848,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
u32 max_packet = l_ptr->max_pkt;
/* Match msg importance against queue limits: */
-
if (unlikely(queue_size >= queue_limit)) {
if (imp <= TIPC_CRITICAL_IMPORTANCE) {
link_schedule_port(l_ptr, msg_origport(msg), size);
@@ -888,12 +863,10 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
/* Fragmentation needed ? */
-
if (size > max_packet)
return link_send_long_buf(l_ptr, buf);
- /* Packet can be queued or sent: */
-
+ /* Packet can be queued or sent. */
if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
!link_congested(l_ptr))) {
link_add_to_outqueue(l_ptr, buf, msg);
@@ -907,13 +880,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
return dsz;
}
- /* Congestion: can message be bundled ?: */
-
+ /* Congestion: can message be bundled ? */
if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
(msg_user(msg) != MSG_FRAGMENTER)) {
/* Try adding message to an existing bundle */
-
if (l_ptr->next_out &&
link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
@@ -921,7 +892,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
/* Try creating a new bundle */
-
if (size <= max_packet * 2 / 3) {
struct sk_buff *bundler = tipc_buf_acquire(max_packet);
struct tipc_msg bundler_hdr;
@@ -951,7 +921,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
* not been selected yet, and the the owner node is not locked
* Called by TIPC internal users, e.g. the name distributor
*/
-
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
{
struct tipc_link *l_ptr;
@@ -984,7 +953,6 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
* small enough not to require fragmentation.
* Called without any locks held.
*/
-
void tipc_link_send_names(struct list_head *message_list, u32 dest)
{
struct tipc_node *n_ptr;
@@ -1013,7 +981,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
read_unlock_bh(&tipc_net_lock);
/* discard the messages if they couldn't be sent */
-
list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
list_del((struct list_head *)buf);
kfree_skb(buf);
@@ -1026,7 +993,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
* inclusive total message length. Very time critical.
* Link is locked. Returns user data length.
*/
-
static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
u32 *used_max_pkt)
{
@@ -1111,7 +1077,6 @@ again:
* Try building message using port's max_pkt hint.
* (Must not hold any locks while building message.)
*/
-
res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
sender->max_pkt, !sender->user_port, &buf);
@@ -1131,12 +1096,10 @@ exit:
}
/* Exit if build request was invalid */
-
if (unlikely(res < 0))
goto exit;
/* Exit if link (or bearer) is congested */
-
if (link_congested(l_ptr) ||
!list_empty(&l_ptr->b_ptr->cong_links)) {
res = link_schedule_port(l_ptr,
@@ -1148,7 +1111,6 @@ exit:
* Message size exceeds max_pkt hint; update hint,
* then re-try fast path or fragment the message
*/
-
sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -1166,7 +1128,6 @@ exit:
read_unlock_bh(&tipc_net_lock);
/* Couldn't find a link to the destination node */
-
if (buf)
return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
if (res >= 0)
@@ -1220,15 +1181,13 @@ again:
sect_crs = NULL;
curr_sect = -1;
- /* Prepare reusable fragment header: */
-
+ /* Prepare reusable fragment header */
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(hdr));
msg_set_size(&fragm_hdr, max_pkt);
msg_set_fragm_no(&fragm_hdr, 1);
- /* Prepare header of first fragment: */
-
+ /* Prepare header of first fragment */
buf_chain = buf = tipc_buf_acquire(max_pkt);
if (!buf)
return -ENOMEM;
@@ -1237,8 +1196,7 @@ again:
hsz = msg_hdr_sz(hdr);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
- /* Chop up message: */
-
+ /* Chop up message */
fragm_crs = INT_H_SIZE + hsz;
fragm_rest = fragm_sz - hsz;
@@ -1329,7 +1287,6 @@ reject:
}
/* Append chain of fragments to send queue & send them */
-
l_ptr->long_msg_seq_no++;
link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
l_ptr->stats.sent_fragments += fragm_no;
@@ -1350,7 +1307,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
/* Step to position where retransmission failed, if any, */
/* consider that buffers may have been released in meantime */
-
if (r_q_size && buf) {
u32 last = lesser(mod(r_q_head + r_q_size),
link_last_sent(l_ptr));
@@ -1365,7 +1321,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
}
/* Continue retransmission now, if there is anything: */
-
if (r_q_size && buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
@@ -1381,7 +1336,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
}
/* Send deferred protocol message, if any: */
-
buf = l_ptr->proto_msg_queue;
if (buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
@@ -1398,7 +1352,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
}
/* Send one deferred data message, if send window not full: */
-
buf = l_ptr->next_out;
if (buf) {
struct tipc_msg *msg = buf_msg(buf);
@@ -1478,16 +1431,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
warn("Retransmission failure on link <%s>\n", l_ptr->name);
if (l_ptr->addr) {
-
/* Handle failure on standard link */
-
link_print(l_ptr, "Resetting link\n");
tipc_link_reset(l_ptr);
} else {
-
/* Handle failure on broadcast link */
-
struct tipc_node *n_ptr;
char addr_string[16];
@@ -1536,7 +1485,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
return;
} else {
/* Detect repeated retransmit failures on uncongested bearer */
-
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
if (++l_ptr->stale_count > 100) {
link_retransmit_failure(l_ptr, buf);
@@ -1571,7 +1519,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
/**
* link_insert_deferred_queue - insert deferred messages back into receive chain
*/
-
static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
@@ -1602,7 +1549,6 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
* TIPC will ignore the excess, under the assumption that it is optional info
* introduced by a later release of the protocol.
*/
-
static int link_recv_buf_validate(struct sk_buff *buf)
{
static u32 min_data_hdr_size[8] = {
@@ -1648,7 +1594,6 @@ static int link_recv_buf_validate(struct sk_buff *buf)
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-
void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
{
read_lock_bh(&tipc_net_lock);
@@ -1666,22 +1611,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
head = head->next;
/* Ensure bearer is still enabled */
-
if (unlikely(!b_ptr->active))
goto cont;
/* Ensure message is well-formed */
-
if (unlikely(!link_recv_buf_validate(buf)))
goto cont;
/* Ensure message data is a single contiguous unit */
-
if (unlikely(skb_linearize(buf)))
goto cont;
/* Handle arrival of a non-unicast link message */
-
msg = buf_msg(buf);
if (unlikely(msg_non_seq(msg))) {
@@ -1693,20 +1634,17 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
}
/* Discard unicast link messages destined for another node */
-
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
/* Locate neighboring node that sent message */
-
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
goto cont;
tipc_node_lock(n_ptr);
/* Locate unicast link endpoint that should handle message */
-
l_ptr = n_ptr->links[b_ptr->identity];
if (unlikely(!l_ptr)) {
tipc_node_unlock(n_ptr);
@@ -1714,7 +1652,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
}
/* Verify that communication with node is currently allowed */
-
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
msg_user(msg) == LINK_PROTOCOL &&
(msg_type(msg) == RESET_MSG ||
@@ -1728,12 +1665,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
}
/* Validate message sequence number info */
-
seq_no = msg_seqno(msg);
ackd = msg_ack(msg);
/* Release acked messages */
-
if (n_ptr->bclink.supported)
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
@@ -1752,7 +1687,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
}
/* Try sending any messages link endpoint has pending */
-
if (unlikely(l_ptr->next_out))
tipc_link_push_queue(l_ptr);
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
@@ -1763,7 +1697,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
}
/* Now (finally!) process the incoming message */
-
protocol_check:
if (likely(link_working_working(l_ptr))) {
if (likely(seq_no == mod(l_ptr->next_in_no))) {
@@ -1859,7 +1792,6 @@ cont:
*
* Returns increase in queue length (i.e. 0 or 1)
*/
-
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
struct sk_buff *buf)
{
@@ -1908,7 +1840,6 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
/*
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
*/
-
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
@@ -1920,14 +1851,12 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
}
/* Record OOS packet arrival (force mismatch on next timeout) */
-
l_ptr->checkpoint--;
/*
* Discard packet if a duplicate; otherwise add it to deferred queue
* and notify peer of gap as per protocol specification
*/
-
if (less(seq_no, mod(l_ptr->next_in_no))) {
l_ptr->stats.duplicates++;
kfree_skb(buf);
@@ -1957,7 +1886,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
int r_flag;
/* Discard any previous message that was deferred due to congestion */
-
if (l_ptr->proto_msg_queue) {
kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
@@ -1967,12 +1895,10 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
return;
/* Abort non-RESET send if communication with node is prohibited */
-
if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
return;
/* Create protocol message with "out-of-sequence" sequence number */
-
msg_set_type(msg, msg_typ);
msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -2040,14 +1966,12 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
/* Defer message if bearer is already congested */
-
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
l_ptr->proto_msg_queue = buf;
return;
}
/* Defer message if attempting to send results in bearer congestion */
-
if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
l_ptr->proto_msg_queue = buf;
@@ -2056,7 +1980,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
}
/* Discard message if it was sent successfully */
-
l_ptr->unacked_window = 0;
kfree_skb(buf);
}
@@ -2066,7 +1989,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-
static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
{
u32 rec_gap = 0;
@@ -2079,7 +2001,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
goto exit;
/* record unnumbered packet arrival (force mismatch on next timeout) */
-
l_ptr->checkpoint--;
if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
@@ -2111,7 +2032,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
/* fall thru' */
case ACTIVATE_MSG:
/* Update link settings according other endpoint's values */
-
strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
msg_tol = msg_link_tolerance(msg);
@@ -2133,7 +2053,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
/* Synchronize broadcast link info, if not done previously */
-
if (!tipc_node_is_up(l_ptr->owner)) {
l_ptr->owner->bclink.last_sent =
l_ptr->owner->bclink.last_in =
@@ -2185,7 +2104,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
}
/* Protocol message before retransmits, reduce loss risk */
-
if (l_ptr->owner->bclink.supported)
tipc_bclink_update_link_state(l_ptr->owner,
msg_last_bcast(msg));
@@ -2243,7 +2161,6 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
* changeover(): Send whole message queue via the remaining link
* Owner node is locked.
*/
-
void tipc_link_changeover(struct tipc_link *l_ptr)
{
u32 msgcount = l_ptr->out_queue_size;
@@ -2343,8 +2260,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
}
}
-
-
/**
* buf_extract - extracts embedded TIPC message from another message
* @skb: encapsulating message buffer
@@ -2353,7 +2268,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
* Returns a new message buffer containing an embedded message. The
* encapsulating message itself is left unchanged.
*/
-
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
{
struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
@@ -2370,7 +2284,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
* link_recv_changeover_msg(): Receive tunneled packet sent
* via other link. Node is locked. Return extracted buffer.
*/
-
static int link_recv_changeover_msg(struct tipc_link **l_ptr,
struct sk_buff **buf)
{
@@ -2405,7 +2318,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
}
/* First original message ?: */
-
if (tipc_link_is_up(dest_link)) {
info("Resetting link <%s>, changeover initiated by peer\n",
dest_link->name);
@@ -2420,7 +2332,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
}
/* Receive original message */
-
if (dest_link->exp_msg_count == 0) {
warn("Link switchover error, "
"got too many tunnelled messages\n");
@@ -2469,7 +2380,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
* Fragmentation/defragmentation:
*/
-
/*
* link_send_long_buf: Entry for buffers needing fragmentation.
* The buffer is complete, inclusive total message length.
@@ -2496,12 +2406,10 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
destaddr = msg_destnode(inmsg);
/* Prepare reusable fragment header: */
-
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, destaddr);
/* Chop up message: */
-
while (rest > 0) {
struct sk_buff *fragm;
@@ -2535,7 +2443,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
kfree_skb(buf);
/* Append chain of fragments to send queue & send them */
-
l_ptr->long_msg_seq_no++;
link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
l_ptr->stats.sent_fragments += fragm_no;
@@ -2551,7 +2458,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
* help storing these values in unused, available fields in the
* pending message. This makes dynamic memory allocation unnecessary.
*/
-
static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
{
msg_set_seqno(buf_msg(buf), seqno);
@@ -2603,7 +2509,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
*fb = NULL;
/* Is there an incomplete message waiting for this fragment? */
-
while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
prev = pbuf;
@@ -2629,7 +2534,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
skb_copy_to_linear_data(pbuf, imsg,
msg_data_sz(fragm));
/* Prepare buffer for subsequent fragments. */
-
set_long_msg_seqno(pbuf, long_msg_seq_no);
set_fragm_size(pbuf, fragm_sz);
set_expected_frags(pbuf, exp_fragm_cnt - 1);
@@ -2650,7 +2554,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
kfree_skb(fbuf);
/* Is message complete? */
-
if (exp_frags == 0) {
if (prev)
prev->next = pbuf->next;
@@ -2672,7 +2575,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
* link_check_defragm_bufs - flush stale incoming message fragments
* @l_ptr: pointer to link
*/
-
static void link_check_defragm_bufs(struct tipc_link *l_ptr)
{
struct sk_buff *prev = NULL;
@@ -2701,8 +2603,6 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr)
}
}
-
-
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
{
if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2714,7 +2614,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
}
-
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
{
/* Data messages from this node, inclusive FIRST_FRAGM */
@@ -2744,7 +2643,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
*
* Returns pointer to link (or 0 if invalid link name).
*/
-
static struct tipc_link *link_find_link(const char *name,
struct tipc_node **node)
{
@@ -2778,7 +2676,6 @@ static struct tipc_link *link_find_link(const char *name,
*
* Returns 1 if value is within range, 0 if not.
*/
-
static int link_value_is_valid(u16 cmd, u32 new_value)
{
switch (cmd) {
@@ -2794,7 +2691,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
return 0;
}
-
/**
* link_cmd_set_value - change priority/tolerance/window for link/bearer/media
* @name - ptr to link, bearer, or media name
@@ -2805,7 +2701,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
*
* Returns 0 if value updated and negative value on error.
*/
-
static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
{
struct tipc_node *node;
@@ -2910,7 +2805,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
* link_reset_statistics - reset link statistics
* @l_ptr: pointer to link
*/
-
static void link_reset_statistics(struct tipc_link *l_ptr)
{
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
@@ -2951,7 +2845,6 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
/**
* percent - convert count to a percentage of total (rounding up or down)
*/
-
static u32 percent(u32 count, u32 total)
{
return (count * 100 + (total / 2)) / total;
@@ -2965,7 +2858,6 @@ static u32 percent(u32 count, u32 total)
*
* Returns length of print buffer data string (or 0 if error)
*/
-
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
{
struct print_buf pb;
@@ -3087,7 +2979,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
*
* If no active link can be found, uses default maximum packet size.
*/
-
u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
{
struct tipc_node *n_ptr;
@@ -3171,4 +3062,3 @@ print_state:
tipc_printbuf_validate(buf);
info("%s", print_area);
}
-
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 73c18c140e1d..d6a60a963ce6 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -47,13 +47,11 @@
/*
* Out-of-range value for link sequence numbers
*/
-
#define INVALID_LINK_SEQ 0x10000
/*
* Link states
*/
-
#define WORKING_WORKING 560810u
#define WORKING_UNKNOWN 560811u
#define RESET_UNKNOWN 560812u
@@ -63,7 +61,6 @@
* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
*/
-
#define MAX_PKT_DEFAULT 1500
/**
@@ -114,7 +111,6 @@
* @defragm_buf: list of partially reassembled inbound message fragments
* @stats: collects statistics regarding link activity
*/
-
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
@@ -255,7 +251,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr,
/*
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
*/
-
static inline u32 buf_seqno(struct sk_buff *buf)
{
return msg_seqno(buf_msg(buf));
@@ -294,7 +289,6 @@ static inline u32 lesser(u32 left, u32 right)
/*
* Link status checking routines
*/
-
static inline int link_working_working(struct tipc_link *l_ptr)
{
return l_ptr->state == WORKING_WORKING;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 895c6e530b0b..026733f24919 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -47,7 +47,6 @@
*
* Additional user-defined print buffers are also permitted.
*/
-
static struct print_buf null_buf = { NULL, 0, NULL, 0 };
struct print_buf *const TIPC_NULL = &null_buf;
@@ -72,7 +71,6 @@ struct print_buf *const TIPC_LOG = &log_buf;
* on the caller to prevent simultaneous use of the print buffer(s) being
* manipulated.
*/
-
static char print_string[TIPC_PB_MAX_STR];
static DEFINE_SPINLOCK(print_lock);
@@ -97,7 +95,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
* Note: If the character array is too small (or absent), the print buffer
* becomes a null device that discards anything written to it.
*/
-
void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
{
pb->buf = raw;
@@ -117,7 +114,6 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
* tipc_printbuf_reset - reinitialize print buffer to empty state
* @pb: pointer to print buffer structure
*/
-
static void tipc_printbuf_reset(struct print_buf *pb)
{
if (pb->buf) {
@@ -133,7 +129,6 @@ static void tipc_printbuf_reset(struct print_buf *pb)
*
* Returns non-zero if print buffer is empty.
*/
-
static int tipc_printbuf_empty(struct print_buf *pb)
{
return !pb->buf || (pb->crs == pb->buf);
@@ -148,7 +143,6 @@ static int tipc_printbuf_empty(struct print_buf *pb)
*
* Returns length of print buffer data string (including trailing NUL)
*/
-
int tipc_printbuf_validate(struct print_buf *pb)
{
char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
@@ -182,14 +176,12 @@ int tipc_printbuf_validate(struct print_buf *pb)
* Current contents of destination print buffer (if any) are discarded.
* Source print buffer becomes empty if a successful move occurs.
*/
-
static void tipc_printbuf_move(struct print_buf *pb_to,
struct print_buf *pb_from)
{
int len;
/* Handle the cases where contents can't be moved */
-
if (!pb_to->buf)
return;
@@ -206,7 +198,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
}
/* Copy data from char after cursor to end (if used) */
-
len = pb_from->buf + pb_from->size - pb_from->crs - 2;
if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
strcpy(pb_to->buf, pb_from->crs + 1);
@@ -215,7 +206,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
pb_to->crs = pb_to->buf;
/* Copy data from start to cursor (always) */
-
len = pb_from->crs - pb_from->buf;
strcpy(pb_to->crs, pb_from->buf);
pb_to->crs += len;
@@ -228,7 +218,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
* @pb: pointer to print buffer
* @fmt: formatted info to be printed
*/
-
void tipc_printf(struct print_buf *pb, const char *fmt, ...)
{
int chars_to_add;
@@ -270,7 +259,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
* tipc_log_resize - change the size of the TIPC log buffer
* @log_size: print buffer size to use
*/
-
int tipc_log_resize(int log_size)
{
int res = 0;
@@ -295,7 +283,6 @@ int tipc_log_resize(int log_size)
/**
* tipc_log_resize_cmd - reconfigure size of TIPC log buffer
*/
-
struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
{
u32 value;
@@ -316,7 +303,6 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
/**
* tipc_log_dump - capture TIPC log buffer contents in configuration message
*/
-
struct sk_buff *tipc_log_dump(void)
{
struct sk_buff *reply;
diff --git a/net/tipc/log.h b/net/tipc/log.h
index 2248d96238e6..d1f5eb967fd8 100644
--- a/net/tipc/log.h
+++ b/net/tipc/log.h
@@ -44,7 +44,6 @@
* @crs: pointer to first unused space in character array (i.e. final NUL)
* @echo: echo output to system console if non-zero
*/
-
struct print_buf {
char *buf;
u32 size;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e3afe162c0ac..deea0d232dca 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -72,7 +72,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
*
* Returns message data size or errno
*/
-
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
u32 num_sect, unsigned int total_len,
int max_size, int usrmem, struct sk_buff **buf)
@@ -112,7 +111,6 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
}
#ifdef CONFIG_TIPC_DEBUG
-
void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
{
u32 usr = msg_user(msg);
@@ -352,5 +350,4 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
}
-
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index eba524e34a6b..ba2a72beea68 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -44,7 +44,6 @@
*
* Note: Some items are also used with TIPC internal message headers
*/
-
#define TIPC_VERSION 2
/*
@@ -58,7 +57,6 @@
/*
* Payload message types
*/
-
#define TIPC_CONN_MSG 0
#define TIPC_MCAST_MSG 1
#define TIPC_NAMED_MSG 2
@@ -67,7 +65,6 @@
/*
* Message header sizes
*/
-
#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
#define BASIC_H_SIZE 32 /* Basic payload message */
#define NAMED_H_SIZE 40 /* Named payload message */
@@ -121,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
/*
* Word 0
*/
-
static inline u32 msg_version(struct tipc_msg *m)
{
return msg_bits(m, 0, 29, 7);
@@ -216,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
/*
* Word 1
*/
-
static inline u32 msg_type(struct tipc_msg *m)
{
return msg_bits(m, 1, 29, 0x7);
@@ -291,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
/*
* Word 2
*/
-
static inline u32 msg_ack(struct tipc_msg *m)
{
return msg_bits(m, 2, 16, 0xffff);
@@ -315,8 +309,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
/*
* Words 3-10
*/
-
-
static inline u32 msg_prevnode(struct tipc_msg *m)
{
return msg_word(m, 3);
@@ -434,7 +426,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
return (struct tipc_msg *)msg_data(m);
}
-
/*
* Constants and routines used to read and write TIPC internal message headers
*/
@@ -442,7 +433,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Internal message users
*/
-
#define BCAST_PROTOCOL 5
#define MSG_BUNDLER 6
#define LINK_PROTOCOL 7
@@ -456,7 +446,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Connection management protocol message types
*/
-
#define CONN_PROBE 0
#define CONN_PROBE_REPLY 1
#define CONN_ACK 2
@@ -464,14 +453,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Name distributor message types
*/
-
#define PUBLICATION 0
#define WITHDRAWAL 1
/*
* Segmentation message types
*/
-
#define FIRST_FRAGMENT 0
#define FRAGMENT 1
#define LAST_FRAGMENT 2
@@ -479,7 +466,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Link management protocol message types
*/
-
#define STATE_MSG 0
#define RESET_MSG 1
#define ACTIVATE_MSG 2
@@ -493,7 +479,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Config protocol message types
*/
-
#define DSC_REQ_MSG 0
#define DSC_RESP_MSG 1
@@ -501,7 +486,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
/*
* Word 1
*/
-
static inline u32 msg_seq_gap(struct tipc_msg *m)
{
return msg_bits(m, 1, 16, 0x1fff);
@@ -526,7 +510,6 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
/*
* Word 2
*/
-
static inline u32 msg_dest_domain(struct tipc_msg *m)
{
return msg_word(m, 2);
@@ -561,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
/*
* Word 4
*/
-
static inline u32 msg_last_bcast(struct tipc_msg *m)
{
return msg_bits(m, 4, 16, 0xffff);
@@ -628,7 +610,6 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
/*
* Word 5
*/
-
static inline u32 msg_session(struct tipc_msg *m)
{
return msg_bits(m, 5, 16, 0xffff);
@@ -697,7 +678,6 @@ static inline char *msg_media_addr(struct tipc_msg *m)
/*
* Word 9
*/
-
static inline u32 msg_msgcnt(struct tipc_msg *m)
{
return msg_bits(m, 9, 16, 0xffff);
@@ -744,5 +724,4 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
u32 num_sect, unsigned int total_len,
int max_size, int usrmem, struct sk_buff **buf);
-
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index d57da6159616..158318e67b08 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -58,7 +58,6 @@
* Note: There is no field that identifies the publishing node because it is
* the same for all items contained within a publication message.
*/
-
struct distr_item {
__be32 type;
__be32 lower;
@@ -68,17 +67,41 @@ struct distr_item {
};
/**
- * List of externally visible publications by this node --
- * that is, all publications having scope > TIPC_NODE_SCOPE.
+ * struct publ_list - list of publications made by this node
+ * @list: circular list of publications
+ * @list_size: number of entries in list
*/
+struct publ_list {
+ struct list_head list;
+ u32 size;
+};
+
+static struct publ_list publ_zone = {
+ .list = LIST_HEAD_INIT(publ_zone.list),
+ .size = 0,
+};
+
+static struct publ_list publ_cluster = {
+ .list = LIST_HEAD_INIT(publ_cluster.list),
+ .size = 0,
+};
+
+static struct publ_list publ_node = {
+ .list = LIST_HEAD_INIT(publ_node.list),
+ .size = 0,
+};
+
+static struct publ_list *publ_lists[] = {
+ NULL,
+ &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
+ &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
+ &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
+};
-static LIST_HEAD(publ_root);
-static u32 publ_cnt;
/**
* publ_to_item - add publication info to a publication message
*/
-
static void publ_to_item(struct distr_item *i, struct publication *p)
{
i->type = htonl(p->type);
@@ -91,7 +114,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
*/
-
static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
{
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
@@ -126,14 +148,16 @@ static void named_cluster_distribute(struct sk_buff *buf)
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
-
void tipc_named_publish(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
- list_add_tail(&publ->local_list, &publ_root);
- publ_cnt++;
+ list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
+ publ_lists[publ->scope]->size++;
+
+ if (publ->scope == TIPC_NODE_SCOPE)
+ return;
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
@@ -149,14 +173,16 @@ void tipc_named_publish(struct publication *publ)
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
*/
-
void tipc_named_withdraw(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
list_del(&publ->local_list);
- publ_cnt--;
+ publ_lists[publ->scope]->size--;
+
+ if (publ->scope == TIPC_NODE_SCOPE)
+ return;
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
@@ -169,25 +195,51 @@ void tipc_named_withdraw(struct publication *publ)
named_cluster_distribute(buf);
}
+/*
+ * named_distribute - prepare name info for bulk distribution to another node
+ */
+static void named_distribute(struct list_head *message_list, u32 node,
+ struct publ_list *pls, u32 max_item_buf)
+{
+ struct publication *publ;
+ struct sk_buff *buf = NULL;
+ struct distr_item *item = NULL;
+ u32 left = 0;
+ u32 rest = pls->size * ITEM_SIZE;
+
+ list_for_each_entry(publ, &pls->list, local_list) {
+ if (!buf) {
+ left = (rest <= max_item_buf) ? rest : max_item_buf;
+ rest -= left;
+ buf = named_prepare_buf(PUBLICATION, left, node);
+ if (!buf) {
+ warn("Bulk publication failure\n");
+ return;
+ }
+ item = (struct distr_item *)msg_data(buf_msg(buf));
+ }
+ publ_to_item(item, publ);
+ item++;
+ left -= ITEM_SIZE;
+ if (!left) {
+ list_add_tail((struct list_head *)buf, message_list);
+ buf = NULL;
+ }
+ }
+}
+
/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
-
void tipc_named_node_up(unsigned long nodearg)
{
struct tipc_node *n_ptr;
struct tipc_link *l_ptr;
- struct publication *publ;
- struct distr_item *item = NULL;
- struct sk_buff *buf = NULL;
struct list_head message_list;
u32 node = (u32)nodearg;
- u32 left = 0;
- u32 rest;
u32 max_item_buf = 0;
/* compute maximum amount of publication data to send per message */
-
read_lock_bh(&tipc_net_lock);
n_ptr = tipc_node_find(node);
if (n_ptr) {
@@ -203,32 +255,11 @@ void tipc_named_node_up(unsigned long nodearg)
return;
/* create list of publication messages, then send them as a unit */
-
INIT_LIST_HEAD(&message_list);
read_lock_bh(&tipc_nametbl_lock);
- rest = publ_cnt * ITEM_SIZE;
-
- list_for_each_entry(publ, &publ_root, local_list) {
- if (!buf) {
- left = (rest <= max_item_buf) ? rest : max_item_buf;
- rest -= left;
- buf = named_prepare_buf(PUBLICATION, left, node);
- if (!buf) {
- warn("Bulk publication distribution failure\n");
- goto exit;
- }
- item = (struct distr_item *)msg_data(buf_msg(buf));
- }
- publ_to_item(item, publ);
- item++;
- left -= ITEM_SIZE;
- if (!left) {
- list_add_tail((struct list_head *)buf, &message_list);
- buf = NULL;
- }
- }
-exit:
+ named_distribute(&message_list, node, &publ_cluster, max_item_buf);
+ named_distribute(&message_list, node, &publ_zone, max_item_buf);
read_unlock_bh(&tipc_nametbl_lock);
tipc_link_send_names(&message_list, (u32)node);
@@ -240,7 +271,6 @@ exit:
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
-
static void named_purge_publ(struct publication *publ)
{
struct publication *p;
@@ -264,7 +294,6 @@ static void named_purge_publ(struct publication *publ)
/**
* tipc_named_recv - process name table update message sent by another node
*/
-
void tipc_named_recv(struct sk_buff *buf)
{
struct publication *publ;
@@ -316,21 +345,22 @@ void tipc_named_recv(struct sk_buff *buf)
}
/**
- * tipc_named_reinit - re-initialize local publication list
+ * tipc_named_reinit - re-initialize local publications
*
* This routine is called whenever TIPC networking is enabled.
- * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's new network address.
+ * All name table entries published by this node are updated to reflect
+ * the node's new network address.
*/
-
void tipc_named_reinit(void)
{
struct publication *publ;
+ int scope;
write_lock_bh(&tipc_nametbl_lock);
- list_for_each_entry(publ, &publ_root, local_list)
- publ->node = tipc_own_addr;
+ for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
+ list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
+ publ->node = tipc_own_addr;
write_unlock_bh(&tipc_nametbl_lock);
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c6a1ae36952e..010f24a59da2 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -56,7 +56,6 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
* publications of the associated name sequence belong to it.
* (The cluster and node lists may be empty.)
*/
-
struct name_info {
struct list_head node_list;
struct list_head cluster_list;
@@ -72,7 +71,6 @@ struct name_info {
* @upper: name sequence upper bound
* @info: pointer to name sequence publication info
*/
-
struct sub_seq {
u32 lower;
u32 upper;
@@ -90,7 +88,6 @@ struct sub_seq {
* @subscriptions: list of subscriptions for this 'type'
* @lock: spinlock controlling access to publication lists of all sub-sequences
*/
-
struct name_seq {
u32 type;
struct sub_seq *sseqs;
@@ -107,7 +104,6 @@ struct name_seq {
* accessed via hashing on 'type'; name sequence lists are *not* sorted
* @local_publ_count: number of publications issued by this node
*/
-
struct name_table {
struct hlist_head *types;
u32 local_publ_count;
@@ -124,7 +120,6 @@ static int hash(int x)
/**
* publ_create - create a publication structure
*/
-
static struct publication *publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port_ref,
u32 key)
@@ -151,7 +146,6 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
/**
* tipc_subseq_alloc - allocate a specified number of sub-sequence structures
*/
-
static struct sub_seq *tipc_subseq_alloc(u32 cnt)
{
struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
@@ -163,7 +157,6 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
*
* Allocates a single sub-sequence structure and sets it to all 0's.
*/
-
static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
{
struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
@@ -186,12 +179,23 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
return nseq;
}
-/**
+/*
+ * nameseq_delete_empty - deletes a name sequence structure if now unused
+ */
+static void nameseq_delete_empty(struct name_seq *seq)
+{
+ if (!seq->first_free && list_empty(&seq->subscriptions)) {
+ hlist_del_init(&seq->ns_list);
+ kfree(seq->sseqs);
+ kfree(seq);
+ }
+}
+
+/*
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
* Very time-critical, so binary searches through sub-sequence array.
*/
-
static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
u32 instance)
{
@@ -221,7 +225,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
*
* Note: Similar to binary search code for locating a sub-sequence.
*/
-
static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
{
struct sub_seq *sseqs = nseq->sseqs;
@@ -242,9 +245,8 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
}
/**
- * tipc_nameseq_insert_publ -
+ * tipc_nameseq_insert_publ
*/
-
static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port, u32 key)
@@ -260,7 +262,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
if (sseq) {
/* Lower end overlaps existing entry => need an exact match */
-
if ((sseq->lower != lower) || (sseq->upper != upper)) {
warn("Cannot publish {%u,%u,%u}, overlap error\n",
type, lower, upper);
@@ -280,11 +281,9 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
struct sub_seq *freesseq;
/* Find where lower end should be inserted */
-
inspos = nameseq_locate_subseq(nseq, lower);
/* Fail if upper end overlaps into an existing entry */
-
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
warn("Cannot publish {%u,%u,%u}, overlap error\n",
@@ -293,7 +292,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
}
/* Ensure there is space for new sub-sequence */
-
if (nseq->first_free == nseq->alloc) {
struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
@@ -321,7 +319,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
INIT_LIST_HEAD(&info->zone_list);
/* Insert new sub-sequence */
-
sseq = &nseq->sseqs[inspos];
freesseq = &nseq->sseqs[nseq->first_free];
memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
@@ -333,8 +330,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
created_subseq = 1;
}
- /* Insert a publication: */
-
+ /* Insert a publication */
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
return NULL;
@@ -347,14 +343,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
info->cluster_list_size++;
}
- if (node == tipc_own_addr) {
+ if (in_own_node(node)) {
list_add(&publ->node_list, &info->node_list);
info->node_list_size++;
}
- /*
- * Any subscriptions waiting for notification?
- */
+ /* Any subscriptions waiting for notification? */
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscr_report_overlap(s,
publ->lower,
@@ -368,7 +362,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
}
/**
- * tipc_nameseq_remove_publ -
+ * tipc_nameseq_remove_publ
*
* NOTE: There may be cases where TIPC is asked to remove a publication
* that is not in the name table. For example, if another node issues a
@@ -378,7 +372,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
* A failed withdraw request simply returns a failure indication and lets the
* caller issue any error or warning messages associated with such a problem.
*/
-
static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
u32 node, u32 ref, u32 key)
{
@@ -395,7 +388,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
info = sseq->info;
/* Locate publication, if it exists */
-
list_for_each_entry(publ, &info->zone_list, zone_list) {
if ((publ->key == key) && (publ->ref == ref) &&
(!publ->node || (publ->node == node)))
@@ -405,26 +397,22 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
found:
/* Remove publication from zone scope list */
-
list_del(&publ->zone_list);
info->zone_list_size--;
/* Remove publication from cluster scope list, if present */
-
if (in_own_cluster(node)) {
list_del(&publ->cluster_list);
info->cluster_list_size--;
}
/* Remove publication from node scope list, if present */
-
- if (node == tipc_own_addr) {
+ if (in_own_node(node)) {
list_del(&publ->node_list);
info->node_list_size--;
}
/* Contract subseq list if no more publications for that subseq */
-
if (list_empty(&info->zone_list)) {
kfree(info);
free = &nseq->sseqs[nseq->first_free--];
@@ -433,7 +421,6 @@ found:
}
/* Notify any waiting subscriptions */
-
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscr_report_overlap(s,
publ->lower,
@@ -452,7 +439,6 @@ found:
* the prescribed number of events if there is any sub-
* sequence overlapping with the requested sequence
*/
-
static void tipc_nameseq_subscribe(struct name_seq *nseq,
struct tipc_subscription *s)
{
@@ -504,9 +490,10 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
{
struct name_seq *seq = nametbl_find_seq(type);
- if (lower > upper) {
- warn("Failed to publish illegal {%u,%u,%u}\n",
- type, lower, upper);
+ if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
+ (lower > upper)) {
+ dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
+ type, lower, upper, scope);
return NULL;
}
@@ -529,12 +516,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
return NULL;
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
-
- if (!seq->first_free && list_empty(&seq->subscriptions)) {
- hlist_del_init(&seq->ns_list);
- kfree(seq->sseqs);
- kfree(seq);
- }
+ nameseq_delete_empty(seq);
return publ;
}
@@ -551,7 +533,6 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
* - if name translation is attempted and fails, sets 'destnode' to 0
* and returns 0
*/
-
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
{
struct sub_seq *sseq;
@@ -574,7 +555,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
spin_lock_bh(&seq->lock);
info = sseq->info;
- /* Closest-First Algorithm: */
+ /* Closest-First Algorithm */
if (likely(!*destnode)) {
if (!list_empty(&info->node_list)) {
publ = list_first_entry(&info->node_list,
@@ -597,14 +578,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
}
}
- /* Round-Robin Algorithm: */
+ /* Round-Robin Algorithm */
else if (*destnode == tipc_own_addr) {
if (list_empty(&info->node_list))
goto no_match;
publ = list_first_entry(&info->node_list, struct publication,
node_list);
list_move_tail(&publ->node_list, &info->node_list);
- } else if (in_own_cluster(*destnode)) {
+ } else if (in_own_cluster_exact(*destnode)) {
if (list_empty(&info->cluster_list))
goto no_match;
publ = list_first_entry(&info->cluster_list, struct publication,
@@ -638,7 +619,6 @@ not_found:
*
* Returns non-zero if any off-node ports overlap
*/
-
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
struct tipc_port_list *dports)
{
@@ -682,7 +662,6 @@ exit:
/*
* tipc_nametbl_publish - add name publication to network name tables
*/
-
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
u32 scope, u32 port_ref, u32 key)
{
@@ -695,11 +674,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
}
write_lock_bh(&tipc_nametbl_lock);
- table.local_publ_count++;
publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
tipc_own_addr, port_ref, key);
- if (publ && (scope != TIPC_NODE_SCOPE))
+ if (likely(publ)) {
+ table.local_publ_count++;
tipc_named_publish(publ);
+ }
write_unlock_bh(&tipc_nametbl_lock);
return publ;
}
@@ -707,7 +687,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
/**
* tipc_nametbl_withdraw - withdraw name publication from network name tables
*/
-
int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
@@ -716,8 +695,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (likely(publ)) {
table.local_publ_count--;
- if (publ->scope != TIPC_NODE_SCOPE)
- tipc_named_withdraw(publ);
+ tipc_named_withdraw(publ);
write_unlock_bh(&tipc_nametbl_lock);
list_del_init(&publ->pport_list);
kfree(publ);
@@ -733,7 +711,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
-
void tipc_nametbl_subscribe(struct tipc_subscription *s)
{
u32 type = s->seq.type;
@@ -757,7 +734,6 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
/**
* tipc_nametbl_unsubscribe - remove a subscription object from name table
*/
-
void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
{
struct name_seq *seq;
@@ -768,11 +744,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
spin_unlock_bh(&seq->lock);
- if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
- hlist_del_init(&seq->ns_list);
- kfree(seq->sseqs);
- kfree(seq);
- }
+ nameseq_delete_empty(seq);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@@ -781,7 +753,6 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
/**
* subseq_list: print specified sub-sequence contents into the given buffer
*/
-
static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
u32 index)
{
@@ -818,7 +789,6 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
/**
* nameseq_list: print specified name sequence contents into the given buffer
*/
-
static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
u32 type, u32 lowbound, u32 upbound, u32 index)
{
@@ -849,7 +819,6 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
/**
* nametbl_header - print name table header into the given buffer
*/
-
static void nametbl_header(struct print_buf *buf, u32 depth)
{
const char *header[] = {
@@ -871,7 +840,6 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
/**
* nametbl_list - print specified name table contents into the given buffer
*/
-
static void nametbl_list(struct print_buf *buf, u32 depth_info,
u32 type, u32 lowbound, u32 upbound)
{
@@ -970,7 +938,6 @@ void tipc_nametbl_stop(void)
return;
/* Verify name table is empty, then release it */
-
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < tipc_nametbl_size; i++) {
if (!hlist_empty(&table.types[i]))
@@ -980,4 +947,3 @@ void tipc_nametbl_stop(void)
table.types = NULL;
write_unlock_bh(&tipc_nametbl_lock);
}
-
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 207d59ebf849..71cb4dc712df 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -45,10 +45,8 @@ struct tipc_port_list;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
*/
-
#define TIPC_ZM_SRV 3 /* zone master service name type */
-
/**
* struct publication - info about a published (name or) name sequence
* @type: name sequence type
@@ -67,7 +65,6 @@ struct tipc_port_list;
*
* Note that the node list, cluster list, and zone list are circular lists.
*/
-
struct publication {
u32 type;
u32 lower;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d4531b07076c..7c236c89cf5e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -175,17 +175,14 @@ int tipc_net_start(u32 addr)
{
char addr_string[16];
- tipc_subscr_stop();
- tipc_cfg_stop();
-
+ write_lock_bh(&tipc_net_lock);
tipc_own_addr = addr;
tipc_named_reinit();
tipc_port_reinit();
-
tipc_bclink_init();
+ write_unlock_bh(&tipc_net_lock);
- tipc_k_signal((Handler)tipc_subscr_start, 0);
- tipc_k_signal((Handler)tipc_cfg_init, 0);
+ tipc_cfg_reinit();
info("Started in network mode\n");
info("Own node address %s, network identity %u\n",
diff --git a/net/tipc/node.c b/net/tipc/node.c
index a34cabc2c43a..d4fd341e6e0d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -58,7 +58,7 @@ static atomic_t tipc_num_links = ATOMIC_INIT(0);
* entries has been chosen so that no hash chain exceeds 8 nodes and will
* usually be much smaller (typically only a single node).
*/
-static inline unsigned int tipc_hashfn(u32 addr)
+static unsigned int tipc_hashfn(u32 addr)
{
return addr & (NODE_HTABLE_SIZE - 1);
}
@@ -66,13 +66,12 @@ static inline unsigned int tipc_hashfn(u32 addr)
/*
* tipc_node_find - locate specified node object, if it exists
*/
-
struct tipc_node *tipc_node_find(u32 addr)
{
struct tipc_node *node;
struct hlist_node *pos;
- if (unlikely(!in_own_cluster(addr)))
+ if (unlikely(!in_own_cluster_exact(addr)))
return NULL;
hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
@@ -91,7 +90,6 @@ struct tipc_node *tipc_node_find(u32 addr)
* time. (It would be preferable to switch to holding net_lock in write mode,
* but this is a non-trivial change.)
*/
-
struct tipc_node *tipc_node_create(u32 addr)
{
struct tipc_node *n_ptr, *temp_node;
@@ -142,13 +140,11 @@ void tipc_node_delete(struct tipc_node *n_ptr)
tipc_num_nodes--;
}
-
/**
* tipc_node_link_up - handle addition of link
*
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
-
void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
struct tipc_link **active = &n_ptr->active_links[0];
@@ -181,7 +177,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
/**
* node_select_active_links - select active link
*/
-
static void node_select_active_links(struct tipc_node *n_ptr)
{
struct tipc_link **active = &n_ptr->active_links[0];
@@ -209,7 +204,6 @@ static void node_select_active_links(struct tipc_node *n_ptr)
/**
* tipc_node_link_down - handle loss of link
*/
-
void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
struct tipc_link **active;
@@ -300,7 +294,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_addr_string_fill(addr_string, n_ptr->addr));
/* Flush broadcast link info associated with lost node */
-
if (n_ptr->bclink.supported) {
while (n_ptr->bclink.deferred_head) {
struct sk_buff *buf = n_ptr->bclink.deferred_head;
@@ -334,7 +327,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_nodesub_notify(n_ptr);
/* Prevent re-contact with node until cleanup is done */
-
n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
}
@@ -362,7 +354,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
}
/* For now, get space for all other nodes */
-
payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
if (payload_size > 32768u) {
read_unlock_bh(&tipc_net_lock);
@@ -376,7 +367,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
}
/* Add TLVs for all nodes in scope */
-
list_for_each_entry(n_ptr, &tipc_node_list, list) {
if (!tipc_in_scope(domain, n_ptr->addr))
continue;
@@ -412,7 +402,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
read_lock_bh(&tipc_net_lock);
/* Get space for all unicast links + broadcast link */
-
payload_size = TLV_SPACE(sizeof(link_info)) *
(atomic_read(&tipc_num_links) + 1);
if (payload_size > 32768u) {
@@ -427,14 +416,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
}
/* Add TLV for broadcast link */
-
link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
link_info.up = htonl(1);
strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
/* Add TLVs for any other links in scope */
-
list_for_each_entry(n_ptr, &tipc_node_list, list) {
u32 i;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 72561c971d67..cfcaf4d6e480 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -48,7 +48,6 @@
#define INVALID_NODE_SIG 0x10000
/* Flags used to block (re)establishment of contact with a neighboring node */
-
#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
@@ -79,7 +78,6 @@
* @deferred_tail: newest OOS b'cast message received from node
* @defragm: list of partially reassembled b'cast message fragments from node
*/
-
struct tipc_node {
u32 addr;
spinlock_t lock;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index c3c2815ae630..7a27344108fe 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,11 +41,10 @@
/**
* tipc_nodesub_subscribe - create "node down" subscription for specified node
*/
-
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
- if (addr == tipc_own_addr) {
+ if (in_own_node(addr)) {
node_sub->node = NULL;
return;
}
@@ -66,7 +65,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
/**
* tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
*/
-
void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
{
if (!node_sub->node)
@@ -82,7 +80,6 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
*
* Note: node is locked by caller
*/
-
void tipc_nodesub_notify(struct tipc_node *node)
{
struct tipc_node_subscr *ns;
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 4bc2ca0867a1..c95d20727ded 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle);
* @usr_handle: argument to pass to routine when node fails
* @nodesub_list: adjacent entries in list of subscriptions for the node
*/
-
struct tipc_node_subscr {
struct tipc_node *node;
net_ev_handler handle_node_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 94d2904cce66..2ad37a4db376 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,10 +69,30 @@ static u32 port_peerport(struct tipc_port *p_ptr)
return msg_destport(&p_ptr->phdr);
}
+/*
+ * tipc_port_peer_msg - verify message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
+{
+ u32 peernode;
+ u32 orignode;
+
+ if (msg_origport(msg) != port_peerport(p_ptr))
+ return 0;
+
+ orignode = msg_orignode(msg);
+ peernode = port_peernode(p_ptr);
+ return (orignode == peernode) ||
+ (!orignode && (peernode == tipc_own_addr)) ||
+ (!peernode && (orignode == tipc_own_addr));
+}
+
/**
* tipc_multicast - send a multicast message to local and remote destinations
*/
-
int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
u32 num_sect, struct iovec const *msg_sect,
unsigned int total_len)
@@ -89,7 +109,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
return -EINVAL;
/* Create multicast message */
-
hdr = &oport->phdr;
msg_set_type(hdr, TIPC_MCAST_MSG);
msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
@@ -105,12 +124,10 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
return res;
/* Figure out where to send multicast message */
-
ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
TIPC_NODE_SCOPE, &dports);
/* Send message to destinations (duplicate it only if necessary) */
-
if (ext_targets) {
if (dports.count != 0) {
ibuf = skb_copy(buf, GFP_ATOMIC);
@@ -141,7 +158,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
*
* If there is no port list, perform a lookup to create one
*/
-
void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
{
struct tipc_msg *msg;
@@ -152,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
msg = buf_msg(buf);
/* Create destination port list, if one wasn't supplied */
-
if (dp == NULL) {
tipc_nametbl_mc_translate(msg_nametype(msg),
msg_namelower(msg),
@@ -163,7 +178,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
}
/* Deliver a copy of message to each destination port */
-
if (dp->count != 0) {
msg_set_destnode(msg, tipc_own_addr);
if (dp->count == 1) {
@@ -196,7 +210,6 @@ exit:
*
* Returns pointer to (locked) TIPC port, or NULL if unable to create it
*/
-
struct tipc_port *tipc_createport_raw(void *usr_handle,
u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
void (*wakeup)(struct tipc_port *),
@@ -221,18 +234,24 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
p_ptr->usr_handle = usr_handle;
p_ptr->max_pkt = MAX_PKT_DEFAULT;
p_ptr->ref = ref;
- msg = &p_ptr->phdr;
- tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
- msg_set_origport(msg, ref);
INIT_LIST_HEAD(&p_ptr->wait_list);
INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
p_ptr->dispatcher = dispatcher;
p_ptr->wakeup = wakeup;
p_ptr->user_port = NULL;
k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
- spin_lock_bh(&tipc_port_list_lock);
INIT_LIST_HEAD(&p_ptr->publications);
INIT_LIST_HEAD(&p_ptr->port_list);
+
+ /*
+ * Must hold port list lock while initializing message header template
+ * to ensure a change to node's own network address doesn't result
+ * in template containing out-dated network address information
+ */
+ spin_lock_bh(&tipc_port_list_lock);
+ msg = &p_ptr->phdr;
+ tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
+ msg_set_origport(msg, ref);
list_add_tail(&p_ptr->port_list, &ports);
spin_unlock_bh(&tipc_port_list_lock);
return p_ptr;
@@ -361,7 +380,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
u32 rmsg_sz;
/* discard rejected message if it shouldn't be returned to sender */
-
if (WARN(!msg_isdata(msg),
"attempt to reject message with user=%u", msg_user(msg))) {
dump_stack();
@@ -374,7 +392,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
* construct returned message by copying rejected message header and
* data (or subset), then updating header fields that need adjusting
*/
-
hdr_sz = msg_hdr_sz(msg);
rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
@@ -413,9 +430,8 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
}
/* send returned message & dispose of rejected message */
-
src_node = msg_prevnode(msg);
- if (src_node == tipc_own_addr)
+ if (in_own_node(src_node))
tipc_port_recv_msg(rbuf);
else
tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
@@ -519,25 +535,20 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
struct tipc_msg *msg = buf_msg(buf);
struct tipc_port *p_ptr;
struct sk_buff *r_buf = NULL;
- u32 orignode = msg_orignode(msg);
- u32 origport = msg_origport(msg);
u32 destport = msg_destport(msg);
int wakeable;
/* Validate connection */
-
p_ptr = tipc_port_lock(destport);
- if (!p_ptr || !p_ptr->connected ||
- (port_peernode(p_ptr) != orignode) ||
- (port_peerport(p_ptr) != origport)) {
+ if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
r_buf = tipc_buf_acquire(BASIC_H_SIZE);
if (r_buf) {
msg = buf_msg(r_buf);
tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
- BASIC_H_SIZE, orignode);
+ BASIC_H_SIZE, msg_orignode(msg));
msg_set_errcode(msg, TIPC_ERR_NO_PORT);
msg_set_origport(msg, destport);
- msg_set_destport(msg, origport);
+ msg_set_destport(msg, msg_origport(msg));
}
if (p_ptr)
tipc_port_unlock(p_ptr);
@@ -545,7 +556,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
}
/* Process protocol message sent by peer */
-
switch (msg_type(msg)) {
case CONN_ACK:
wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
@@ -646,8 +656,6 @@ void tipc_port_reinit(void)
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
msg = &p_ptr->phdr;
- if (msg_orignode(msg) == tipc_own_addr)
- break;
msg_set_prevnode(msg, tipc_own_addr);
msg_set_orignode(msg, tipc_own_addr);
}
@@ -659,7 +667,6 @@ void tipc_port_reinit(void)
* port_dispatcher_sigh(): Signal handler for messages destinated
* to the tipc_port interface.
*/
-
static void port_dispatcher_sigh(void *dummy)
{
struct sk_buff *buf;
@@ -676,6 +683,7 @@ static void port_dispatcher_sigh(void *dummy)
struct tipc_name_seq dseq;
void *usr_handle;
int connected;
+ int peer_invalid;
int published;
u32 message_type;
@@ -696,6 +704,7 @@ static void port_dispatcher_sigh(void *dummy)
up_ptr = p_ptr->user_port;
usr_handle = up_ptr->usr_handle;
connected = p_ptr->connected;
+ peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
published = p_ptr->published;
if (unlikely(msg_errcode(msg)))
@@ -705,8 +714,6 @@ static void port_dispatcher_sigh(void *dummy)
case TIPC_CONN_MSG:{
tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
- u32 peer_port = port_peerport(p_ptr);
- u32 peer_node = port_peernode(p_ptr);
u32 dsz;
tipc_port_unlock(p_ptr);
@@ -715,8 +722,7 @@ static void port_dispatcher_sigh(void *dummy)
if (unlikely(!connected)) {
if (tipc_connect2port(dref, &orig))
goto reject;
- } else if ((msg_origport(msg) != peer_port) ||
- (msg_orignode(msg) != peer_node))
+ } else if (peer_invalid)
goto reject;
dsz = msg_data_sz(msg);
if (unlikely(dsz &&
@@ -768,14 +774,9 @@ err:
case TIPC_CONN_MSG:{
tipc_conn_shutdown_event cb =
up_ptr->conn_err_cb;
- u32 peer_port = port_peerport(p_ptr);
- u32 peer_node = port_peernode(p_ptr);
tipc_port_unlock(p_ptr);
- if (!cb || !connected)
- break;
- if ((msg_origport(msg) != peer_port) ||
- (msg_orignode(msg) != peer_node))
+ if (!cb || !connected || peer_invalid)
break;
tipc_disconnect(dref);
skb_pull(buf, msg_hdr_sz(msg));
@@ -826,7 +827,6 @@ reject:
* port_dispatcher(): Dispatcher for messages destinated
* to the tipc_port interface. Called with port locked.
*/
-
static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
{
buf->next = NULL;
@@ -843,10 +843,8 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
}
/*
- * Wake up port after congestion: Called with port locked,
- *
+ * Wake up port after congestion: Called with port locked
*/
-
static void port_wakeup_sh(unsigned long ref)
{
struct tipc_port *p_ptr;
@@ -892,7 +890,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
/*
* tipc_createport(): user level call.
*/
-
int tipc_createport(void *usr_handle,
unsigned int importance,
tipc_msg_err_event error_cb,
@@ -901,7 +898,7 @@ int tipc_createport(void *usr_handle,
tipc_msg_event msg_cb,
tipc_named_msg_event named_msg_cb,
tipc_conn_msg_event conn_msg_cb,
- tipc_continue_event continue_event_cb,/* May be zero */
+ tipc_continue_event continue_event_cb, /* May be zero */
u32 *portref)
{
struct user_port *up_ptr;
@@ -975,10 +972,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
if (p_ptr->connected)
goto exit;
- if (seq->lower > seq->upper)
- goto exit;
- if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
- goto exit;
key = ref + p_ptr->pub_count + 1;
if (key == ref) {
res = -EADDRINUSE;
@@ -1078,7 +1071,6 @@ exit:
*
* Port must be locked.
*/
-
int tipc_disconnect_port(struct tipc_port *tp_ptr)
{
int res;
@@ -1099,7 +1091,6 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
* tipc_disconnect(): Disconnect port form peer.
* This is a node local operation.
*/
-
int tipc_disconnect(u32 ref)
{
struct tipc_port *p_ptr;
@@ -1134,7 +1125,6 @@ int tipc_shutdown(u32 ref)
/**
* tipc_port_recv_msg - receive message from lower layer and deliver to port user
*/
-
int tipc_port_recv_msg(struct sk_buff *buf)
{
struct tipc_port *p_ptr;
@@ -1152,17 +1142,6 @@ int tipc_port_recv_msg(struct sk_buff *buf)
/* validate destination & pass to port, otherwise reject message */
p_ptr = tipc_port_lock(destport);
if (likely(p_ptr)) {
- if (likely(p_ptr->connected)) {
- if ((unlikely(msg_origport(msg) !=
- tipc_peer_port(p_ptr))) ||
- (unlikely(msg_orignode(msg) !=
- tipc_peer_node(p_ptr))) ||
- (unlikely(!msg_connected(msg)))) {
- err = TIPC_ERR_NO_PORT;
- tipc_port_unlock(p_ptr);
- goto reject;
- }
- }
err = p_ptr->dispatcher(p_ptr, buf);
tipc_port_unlock(p_ptr);
if (likely(!err))
@@ -1170,7 +1149,7 @@ int tipc_port_recv_msg(struct sk_buff *buf)
} else {
err = TIPC_ERR_NO_PORT;
}
-reject:
+
return tipc_reject_msg(buf, err);
}
@@ -1178,7 +1157,6 @@ reject:
* tipc_port_recv_sections(): Concatenate and deliver sectioned
* message for this node.
*/
-
static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
struct iovec const *msg_sect,
unsigned int total_len)
@@ -1196,7 +1174,6 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
/**
* tipc_send - send message sections on connection
*/
-
int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
unsigned int total_len)
{
@@ -1211,7 +1188,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
p_ptr->congested = 1;
if (!tipc_port_congested(p_ptr)) {
destnode = port_peernode(p_ptr);
- if (likely(destnode != tipc_own_addr))
+ if (likely(!in_own_node(destnode)))
res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
total_len, destnode);
else
@@ -1235,7 +1212,6 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
/**
* tipc_send2name - send message sections to port name
*/
-
int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
unsigned int num_sect, struct iovec const *msg_sect,
unsigned int total_len)
@@ -1261,13 +1237,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
msg_set_destport(msg, destport);
if (likely(destport || destnode)) {
- if (likely(destnode == tipc_own_addr))
+ if (likely(in_own_node(destnode)))
res = tipc_port_recv_sections(p_ptr, num_sect,
msg_sect, total_len);
- else
+ else if (tipc_own_addr)
res = tipc_link_send_sections_fast(p_ptr, msg_sect,
num_sect, total_len,
destnode);
+ else
+ res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
+ num_sect, total_len,
+ TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
@@ -1285,7 +1265,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
/**
* tipc_send2port - send message sections to port identity
*/
-
int tipc_send2port(u32 ref, struct tipc_portid const *dest,
unsigned int num_sect, struct iovec const *msg_sect,
unsigned int total_len)
@@ -1305,12 +1284,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
msg_set_destport(msg, dest->ref);
msg_set_hdr_sz(msg, BASIC_H_SIZE);
- if (dest->node == tipc_own_addr)
+ if (in_own_node(dest->node))
res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
total_len);
- else
+ else if (tipc_own_addr)
res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
total_len, dest->node);
+ else
+ res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
+ total_len, TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
@@ -1325,7 +1307,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
/**
* tipc_send_buf2port - send message buffer to port identity
*/
-
int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
struct sk_buff *buf, unsigned int dsz)
{
@@ -1349,7 +1330,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
skb_push(buf, BASIC_H_SIZE);
skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
- if (dest->node == tipc_own_addr)
+ if (in_own_node(dest->node))
res = tipc_port_recv_msg(buf);
else
res = tipc_send_buf_fast(buf, dest->node);
@@ -1362,4 +1343,3 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
return dsz;
return -ELINKCONG;
}
-
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9b88531e5a61..98cbec9c4532 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -81,7 +81,6 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
* @ref: object reference to associated TIPC port
* <various callback routines>
*/
-
struct user_port {
void *usr_handle;
u32 ref;
@@ -201,6 +200,7 @@ int tipc_shutdown(u32 ref);
* The following routines require that the port be locked on entry
*/
int tipc_disconnect_port(struct tipc_port *tp_ptr);
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
/*
* TIPC messaging routines
@@ -235,7 +235,6 @@ void tipc_port_reinit(void);
/**
* tipc_port_lock - lock port instance referred to and return its pointer
*/
-
static inline struct tipc_port *tipc_port_lock(u32 ref)
{
return (struct tipc_port *)tipc_ref_lock(ref);
@@ -246,7 +245,6 @@ static inline struct tipc_port *tipc_port_lock(u32 ref)
*
* Can use pointer instead of tipc_ref_unlock() since port is already locked.
*/
-
static inline void tipc_port_unlock(struct tipc_port *p_ptr)
{
spin_unlock_bh(p_ptr->lock);
@@ -257,16 +255,6 @@ static inline struct tipc_port *tipc_port_deref(u32 ref)
return (struct tipc_port *)tipc_ref_deref(ref);
}
-static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
-{
- return msg_destport(&p_ptr->phdr);
-}
-
-static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
-{
- return msg_destnode(&p_ptr->phdr);
-}
-
static inline int tipc_port_congested(struct tipc_port *p_ptr)
{
return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 9e37b7812c3c..5cada0e38e03 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
* @lock: spinlock controlling access to object
* @ref: reference value for object (combines instance & array index info)
*/
-
struct reference {
void *object;
spinlock_t lock;
@@ -60,7 +59,6 @@ struct reference {
* @index_mask: bitmask for array index portion of reference values
* @start_mask: initial value for instance value portion of reference values
*/
-
struct ref_table {
struct reference *entries;
u32 capacity;
@@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock);
/**
* tipc_ref_table_init - create reference table for objects
*/
-
int tipc_ref_table_init(u32 requested_size, u32 start)
{
struct reference *table;
@@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
/* do nothing */ ;
/* allocate table & mark all entries as uninitialized */
-
table = vzalloc(actual_size * sizeof(struct reference));
if (table == NULL)
return -ENOMEM;
@@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
/**
* tipc_ref_table_stop - destroy reference table for objects
*/
-
void tipc_ref_table_stop(void)
{
if (!tipc_ref_table.entries)
@@ -149,7 +144,6 @@ void tipc_ref_table_stop(void)
* register a partially initialized object, without running the risk that
* the object will be accessed before initialization is complete.
*/
-
u32 tipc_ref_acquire(void *object, spinlock_t **lock)
{
u32 index;
@@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
}
/* take a free entry, if available; otherwise initialize a new entry */
-
write_lock_bh(&ref_table_lock);
if (tipc_ref_table.first_free) {
index = tipc_ref_table.first_free;
@@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
* Disallow future references to an object and free up the entry for re-use.
* Note: The entry's spin_lock may still be busy after discard
*/
-
void tipc_ref_discard(u32 ref)
{
struct reference *entry;
@@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref)
* mark entry as unused; increment instance part of entry's reference
* to invalidate any subsequent references
*/
-
entry->object = NULL;
entry->ref = (ref & ~index_mask) + (index_mask + 1);
/* append entry to free entry list */
-
if (tipc_ref_table.first_free == 0)
tipc_ref_table.first_free = index;
else
@@ -261,7 +251,6 @@ exit:
/**
* tipc_ref_lock - lock referenced object and return pointer to it
*/
-
void *tipc_ref_lock(u32 ref)
{
if (likely(tipc_ref_table.entries)) {
@@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref)
/**
* tipc_ref_deref - return pointer referenced object (without locking it)
*/
-
void *tipc_ref_deref(u32 ref)
{
if (likely(tipc_ref_table.entries)) {
@@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref)
}
return NULL;
}
-
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29e957f64458..5577a447f531 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -123,7 +123,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
*
* Caller must hold socket lock
*/
-
static void advance_rx_queue(struct sock *sk)
{
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
@@ -135,7 +134,6 @@ static void advance_rx_queue(struct sock *sk)
*
* Caller must hold socket lock
*/
-
static void discard_rx_queue(struct sock *sk)
{
struct sk_buff *buf;
@@ -151,7 +149,6 @@ static void discard_rx_queue(struct sock *sk)
*
* Caller must hold socket lock
*/
-
static void reject_rx_queue(struct sock *sk)
{
struct sk_buff *buf;
@@ -174,7 +171,6 @@ static void reject_rx_queue(struct sock *sk)
*
* Returns 0 on success, errno otherwise
*/
-
static int tipc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
@@ -184,7 +180,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
struct tipc_port *tp_ptr;
/* Validate arguments */
-
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
@@ -207,13 +202,11 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
}
/* Allocate socket's protocol area */
-
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (sk == NULL)
return -ENOMEM;
/* Allocate TIPC port for socket to use */
-
tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
TIPC_LOW_IMPORTANCE);
if (unlikely(!tp_ptr)) {
@@ -222,7 +215,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
}
/* Finish initializing socket data structures */
-
sock->ops = ops;
sock->state = state;
@@ -258,7 +250,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
*
* Returns 0 on success, errno otherwise
*/
-
static int release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -270,7 +261,6 @@ static int release(struct socket *sock)
* Exit if socket isn't fully initialized (occurs when a failed accept()
* releases a pre-allocated child socket that was never used)
*/
-
if (sk == NULL)
return 0;
@@ -281,7 +271,6 @@ static int release(struct socket *sock)
* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer)
*/
-
while (sock->state != SS_DISCONNECTING) {
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL)
@@ -303,15 +292,12 @@ static int release(struct socket *sock)
* Delete TIPC port; this ensures no more messages are queued
* (also disconnects an active connection & sends a 'FIN-' to peer)
*/
-
res = tipc_deleteport(tport->ref);
/* Discard any remaining (connection-based) messages in receive queue */
-
discard_rx_queue(sk);
/* Reject any messages that accumulated in backlog queue */
-
sock->state = SS_DISCONNECTING;
release_sock(sk);
@@ -336,7 +322,6 @@ static int release(struct socket *sock)
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-
static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -376,7 +361,6 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
-
static int get_name(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -444,7 +428,6 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-
static unsigned int poll(struct file *file, struct socket *sock,
poll_table *wait)
{
@@ -482,7 +465,6 @@ static unsigned int poll(struct file *file, struct socket *sock,
*
* Returns 0 if permission is granted, otherwise errno
*/
-
static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
{
struct tipc_cfg_msg_hdr hdr;
@@ -518,7 +500,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-
static int send_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
@@ -535,7 +516,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
(dest->family != AF_TIPC)))
return -EINVAL;
if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
- (m->msg_iovlen > (unsigned)INT_MAX))
+ (m->msg_iovlen > (unsigned int)INT_MAX))
return -EMSGSIZE;
if (iocb)
@@ -562,7 +543,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
}
/* Abort any pending connection attempts (very unlikely) */
-
reject_rx_queue(sk);
}
@@ -631,7 +611,6 @@ exit:
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-
static int send_packet(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
@@ -642,12 +621,11 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
int res;
/* Handle implied connection establishment */
-
if (unlikely(dest))
return send_msg(iocb, sock, m, total_len);
if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
- (m->msg_iovlen > (unsigned)INT_MAX))
+ (m->msg_iovlen > (unsigned int)INT_MAX))
return -EMSGSIZE;
if (iocb)
@@ -695,7 +673,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
-
static int send_stream(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
@@ -715,7 +692,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
/* Handle special cases where there is no connection */
-
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_UNCONNECTED) {
res = send_packet(NULL, sock, m, total_len);
@@ -734,8 +710,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
- if ((total_len > (unsigned)INT_MAX) ||
- (m->msg_iovlen > (unsigned)INT_MAX)) {
+ if ((total_len > (unsigned int)INT_MAX) ||
+ (m->msg_iovlen > (unsigned int)INT_MAX)) {
res = -EMSGSIZE;
goto exit;
}
@@ -747,7 +723,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
* (i.e. one large iovec entry), but could be improved to pass sets
* of small iovec entries into send_packet().
*/
-
curr_iov = m->msg_iov;
curr_iovlen = m->msg_iovlen;
my_msg.msg_iov = &my_iov;
@@ -796,7 +771,6 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-
static int auto_connect(struct socket *sock, struct tipc_msg *msg)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -821,7 +795,6 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
*
* Note: Address is not captured if not requested by receiver.
*/
-
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
@@ -847,7 +820,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
*
* Returns 0 if successful, otherwise errno
*/
-
static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
struct tipc_port *tport)
{
@@ -861,7 +833,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
return 0;
/* Optionally capture errored message object(s) */
-
err = msg ? msg_errcode(msg) : 0;
if (unlikely(err)) {
anc_data[0] = err;
@@ -878,7 +849,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
}
/* Optionally capture message destination object */
-
dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
switch (dest_type) {
case TIPC_NAMED_MSG:
@@ -923,7 +893,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
*
* Returns size of returned message data, errno otherwise
*/
-
static int recv_msg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t buf_len, int flags)
{
@@ -937,7 +906,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
int res;
/* Catch invalid receive requests */
-
if (unlikely(!buf_len))
return -EINVAL;
@@ -952,7 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
restart:
/* Look for a message in receive queue; wait if necessary */
-
while (skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
res = -ENOTCONN;
@@ -970,14 +937,12 @@ restart:
}
/* Look at first message in receive queue */
-
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
/* Complete connection setup for an implied connect */
-
if (unlikely(sock->state == SS_CONNECTING)) {
res = auto_connect(sock, msg);
if (res)
@@ -985,24 +950,20 @@ restart:
}
/* Discard an empty non-errored message & try again */
-
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Capture sender's address (optional) */
-
set_orig_addr(m, msg);
/* Capture ancillary data (optional) */
-
res = anc_data_recv(m, msg, tport);
if (res)
goto exit;
/* Capture message data (if valid) & compute return value (always) */
-
if (!err) {
if (unlikely(buf_len < sz)) {
sz = buf_len;
@@ -1022,7 +983,6 @@ restart:
}
/* Consume received message (optional) */
-
if (likely(!(flags & MSG_PEEK))) {
if ((sock->state != SS_READY) &&
(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
@@ -1046,7 +1006,6 @@ exit:
*
* Returns size of returned message data, errno otherwise
*/
-
static int recv_stream(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t buf_len, int flags)
{
@@ -1062,7 +1021,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
int res = 0;
/* Catch invalid receive attempts */
-
if (unlikely(!buf_len))
return -EINVAL;
@@ -1076,10 +1034,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-restart:
+restart:
/* Look for a message in receive queue; wait if necessary */
-
while (skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
res = -ENOTCONN;
@@ -1097,21 +1054,18 @@ restart:
}
/* Look at first message in receive queue */
-
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
/* Discard an empty non-errored message & try again */
-
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Optionally capture sender's address & ancillary data of first msg */
-
if (sz_copied == 0) {
set_orig_addr(m, msg);
res = anc_data_recv(m, msg, tport);
@@ -1120,7 +1074,6 @@ restart:
}
/* Capture message data (if valid) & compute return value (always) */
-
if (!err) {
u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
@@ -1152,7 +1105,6 @@ restart:
}
/* Consume received message (optional) */
-
if (likely(!(flags & MSG_PEEK))) {
if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
tipc_acknowledge(tport->ref, tport->conn_unacked);
@@ -1160,7 +1112,6 @@ restart:
}
/* Loop around if more data is required */
-
if ((sz_copied < buf_len) && /* didn't get all requested data */
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sz_copied < target)) && /* and more is ready or required */
@@ -1181,7 +1132,6 @@ exit:
*
* Returns 1 if queue is unable to accept message, 0 otherwise
*/
-
static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
{
u32 threshold;
@@ -1214,7 +1164,6 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
*
* Returns TIPC error status code (TIPC_OK if message is not to be rejected)
*/
-
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
struct socket *sock = sk->sk_socket;
@@ -1222,12 +1171,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
u32 recv_q_len;
/* Reject message if it is wrong sort of message for socket */
-
- /*
- * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
- * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
- * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
- */
+ if (msg_type(msg) > TIPC_DIRECT_MSG)
+ return TIPC_ERR_NO_PORT;
if (sock->state == SS_READY) {
if (msg_connected(msg))
@@ -1236,7 +1181,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
if (msg_mcast(msg))
return TIPC_ERR_NO_PORT;
if (sock->state == SS_CONNECTED) {
- if (!msg_connected(msg))
+ if (!msg_connected(msg) ||
+ !tipc_port_peer_msg(tipc_sk_port(sk), msg))
return TIPC_ERR_NO_PORT;
} else if (sock->state == SS_CONNECTING) {
if (!msg_connected(msg) && (msg_errcode(msg) == 0))
@@ -1253,7 +1199,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
}
/* Reject message if there isn't room to queue it */
-
recv_q_len = (u32)atomic_read(&tipc_queue_size);
if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
@@ -1266,13 +1211,11 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
}
/* Enqueue message (finally!) */
-
TIPC_SKB_CB(buf)->handle = 0;
atomic_inc(&tipc_queue_size);
__skb_queue_tail(&sk->sk_receive_queue, buf);
/* Initiate connection termination for an incoming 'FIN' */
-
if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
sock->state = SS_DISCONNECTING;
tipc_disconnect_port(tipc_sk_port(sk));
@@ -1292,7 +1235,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
*
* Returns 0
*/
-
static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
{
u32 res;
@@ -1312,7 +1254,6 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
*
* Returns TIPC error status code (TIPC_OK if message is not to be rejected)
*/
-
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1324,12 +1265,11 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
* This code is based on sk_receive_skb(), but must be distinct from it
* since a TIPC-specific filter/reject mechanism is utilized
*/
-
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
- if (sk_add_backlog(sk, buf))
+ if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
res = TIPC_ERR_OVERLOAD;
else
res = TIPC_OK;
@@ -1345,7 +1285,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
*
* Called with port lock already taken.
*/
-
static void wakeupdispatch(struct tipc_port *tport)
{
struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1363,7 +1302,6 @@ static void wakeupdispatch(struct tipc_port *tport)
*
* Returns 0 on success, errno otherwise
*/
-
static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
int flags)
{
@@ -1378,21 +1316,18 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
lock_sock(sk);
/* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
-
if (sock->state == SS_READY) {
res = -EOPNOTSUPP;
goto exit;
}
/* For now, TIPC does not support the non-blocking form of connect() */
-
if (flags & O_NONBLOCK) {
res = -EOPNOTSUPP;
goto exit;
}
/* Issue Posix-compliant error code if socket is in the wrong state */
-
if (sock->state == SS_LISTENING) {
res = -EOPNOTSUPP;
goto exit;
@@ -1412,18 +1347,15 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
* Note: send_msg() validates the rest of the address fields,
* so there's no need to do it here
*/
-
if (dst->addrtype == TIPC_ADDR_MCAST) {
res = -EINVAL;
goto exit;
}
/* Reject any messages already in receive queue (very unlikely) */
-
reject_rx_queue(sk);
/* Send a 'SYN-' to destination */
-
m.msg_name = dest;
m.msg_namelen = destlen;
res = send_msg(NULL, sock, &m, 0);
@@ -1431,7 +1363,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
goto exit;
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
-
timeout = tipc_sk(sk)->conn_timeout;
release_sock(sk);
res = wait_event_interruptible_timeout(*sk_sleep(sk),
@@ -1476,7 +1407,6 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-
static int listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
@@ -1503,7 +1433,6 @@ static int listen(struct socket *sock, int len)
*
* Returns 0 on success, errno otherwise
*/
-
static int accept(struct socket *sock, struct socket *new_sock, int flags)
{
struct sock *sk = sock->sk;
@@ -1546,11 +1475,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
-
reject_rx_queue(new_sk);
/* Connect new socket to it's peer */
-
new_tsock->peer_name.ref = msg_origport(msg);
new_tsock->peer_name.node = msg_orignode(msg);
tipc_connect2port(new_ref, &new_tsock->peer_name);
@@ -1566,7 +1493,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
* Respond to 'SYN-' by discarding it & returning 'ACK'-.
* Respond to 'SYN+' by queuing it on new socket.
*/
-
if (!msg_data_sz(msg)) {
struct msghdr m = {NULL,};
@@ -1592,7 +1518,6 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-
static int shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
@@ -1609,8 +1534,8 @@ static int shutdown(struct socket *sock, int how)
case SS_CONNECTING:
case SS_CONNECTED:
- /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
restart:
+ /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf) {
atomic_dec(&tipc_queue_size);
@@ -1631,7 +1556,6 @@ restart:
case SS_DISCONNECTING:
/* Discard any unreceived messages; wake up sleeping tasks */
-
discard_rx_queue(sk);
if (waitqueue_active(sk_sleep(sk)))
wake_up_interruptible(sk_sleep(sk));
@@ -1659,7 +1583,6 @@ restart:
*
* Returns 0 on success, errno otherwise
*/
-
static int setsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, unsigned int ol)
{
@@ -1719,7 +1642,6 @@ static int setsockopt(struct socket *sock,
*
* Returns 0 on success, errno otherwise
*/
-
static int getsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, int __user *ol)
{
@@ -1780,7 +1702,6 @@ static int getsockopt(struct socket *sock,
/**
* Protocol switches for the various types of TIPC sockets
*/
-
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
@@ -1886,7 +1807,6 @@ int tipc_socket_init(void)
/**
* tipc_socket_stop - stop TIPC socket interface
*/
-
void tipc_socket_stop(void)
{
if (!sockets_enabled)
@@ -1896,4 +1816,3 @@ void tipc_socket_stop(void)
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
-
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index b2964e9895d3..f976e9cd6a72 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -46,7 +46,6 @@
* @subscriber_list: adjacent subscribers in top. server's list of subscribers
* @subscription_list: list of subscription objects for this subscriber
*/
-
struct tipc_subscriber {
u32 port_ref;
spinlock_t *lock;
@@ -56,13 +55,11 @@ struct tipc_subscriber {
/**
* struct top_srv - TIPC network topology subscription service
- * @user_ref: TIPC userid of subscription service
* @setup_port: reference to TIPC port that handles subscription requests
* @subscription_count: number of active subscriptions (not subscribers!)
* @subscriber_list: list of ports subscribing to service
* @lock: spinlock govering access to subscriber list
*/
-
struct top_srv {
u32 setup_port;
atomic_t subscription_count;
@@ -79,7 +76,6 @@ static struct top_srv topsrv;
*
* Returns converted value
*/
-
static u32 htohl(u32 in, int swap)
{
return swap ? swab32(in) : in;
@@ -91,7 +87,6 @@ static u32 htohl(u32 in, int swap)
* Note: Must not hold subscriber's server port lock, since tipc_send() will
* try to take the lock if the message is rejected and returned!
*/
-
static void subscr_send_event(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper,
@@ -117,7 +112,6 @@ static void subscr_send_event(struct tipc_subscription *sub,
*
* Returns 1 if there is overlap, otherwise 0.
*/
-
int tipc_subscr_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper)
@@ -137,7 +131,6 @@ int tipc_subscr_overlap(struct tipc_subscription *sub,
*
* Protected by nameseq.lock in name_table.c
*/
-
void tipc_subscr_report_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper,
@@ -157,43 +150,35 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub,
/**
* subscr_timeout - subscription timeout has occurred
*/
-
static void subscr_timeout(struct tipc_subscription *sub)
{
struct tipc_port *server_port;
/* Validate server port reference (in case subscriber is terminating) */
-
server_port = tipc_port_lock(sub->server_ref);
if (server_port == NULL)
return;
/* Validate timeout (in case subscription is being cancelled) */
-
if (sub->timeout == TIPC_WAIT_FOREVER) {
tipc_port_unlock(server_port);
return;
}
/* Unlink subscription from name table */
-
tipc_nametbl_unsubscribe(sub);
/* Unlink subscription from subscriber */
-
list_del(&sub->subscription_list);
/* Release subscriber's server port */
-
tipc_port_unlock(server_port);
/* Notify subscriber of timeout */
-
subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
/* Now destroy subscription */
-
k_term_timer(&sub->timer);
kfree(sub);
atomic_dec(&topsrv.subscription_count);
@@ -204,7 +189,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
*
* Called with subscriber port locked.
*/
-
static void subscr_del(struct tipc_subscription *sub)
{
tipc_nametbl_unsubscribe(sub);
@@ -223,7 +207,6 @@ static void subscr_del(struct tipc_subscription *sub)
* a new object reference in the interim that uses this lock; this routine will
* simply wait for it to be released, then claim it.)
*/
-
static void subscr_terminate(struct tipc_subscriber *subscriber)
{
u32 port_ref;
@@ -231,18 +214,15 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
struct tipc_subscription *sub_temp;
/* Invalidate subscriber reference */
-
port_ref = subscriber->port_ref;
subscriber->port_ref = 0;
spin_unlock_bh(subscriber->lock);
/* Sever connection to subscriber */
-
tipc_shutdown(port_ref);
tipc_deleteport(port_ref);
/* Destroy any existing subscriptions for subscriber */
-
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (sub->timeout != TIPC_WAIT_FOREVER) {
@@ -253,17 +233,14 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
}
/* Remove subscriber from topology server's subscriber list */
-
spin_lock_bh(&topsrv.lock);
list_del(&subscriber->subscriber_list);
spin_unlock_bh(&topsrv.lock);
/* Reclaim subscriber lock */
-
spin_lock_bh(subscriber->lock);
/* Now destroy subscriber */
-
kfree(subscriber);
}
@@ -276,7 +253,6 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
*
* Note that fields of 's' use subscriber's endianness!
*/
-
static void subscr_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
@@ -285,7 +261,6 @@ static void subscr_cancel(struct tipc_subscr *s,
int found = 0;
/* Find first matching subscription, exit if not found */
-
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
@@ -297,7 +272,6 @@ static void subscr_cancel(struct tipc_subscr *s,
return;
/* Cancel subscription timer (if used), then delete subscription */
-
if (sub->timeout != TIPC_WAIT_FOREVER) {
sub->timeout = TIPC_WAIT_FOREVER;
spin_unlock_bh(subscriber->lock);
@@ -313,7 +287,6 @@ static void subscr_cancel(struct tipc_subscr *s,
*
* Called with subscriber port locked.
*/
-
static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
@@ -321,11 +294,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
int swap;
/* Determine subscriber's endianness */
-
swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
/* Detect & process a subscription cancellation request */
-
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
@@ -333,7 +304,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
}
/* Refuse subscription if global limit exceeded */
-
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
warn("Subscription rejected, subscription limit reached (%u)\n",
tipc_max_subscriptions);
@@ -342,7 +312,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
}
/* Allocate subscription object */
-
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
warn("Subscription rejected, no memory\n");
@@ -351,7 +320,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
}
/* Initialize subscription object */
-
sub->seq.type = htohl(s->seq.type, swap);
sub->seq.lower = htohl(s->seq.lower, swap);
sub->seq.upper = htohl(s->seq.upper, swap);
@@ -385,7 +353,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
*
* Called with subscriber's server port unlocked.
*/
-
static void subscr_conn_shutdown_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
@@ -409,7 +376,6 @@ static void subscr_conn_shutdown_event(void *usr_handle,
*
* Called with subscriber's server port unlocked.
*/
-
static void subscr_conn_msg_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
@@ -424,7 +390,6 @@ static void subscr_conn_msg_event(void *usr_handle,
* Lock subscriber's server port (& make a local copy of lock pointer,
* in case subscriber is deleted while processing subscription request)
*/
-
if (tipc_port_lock(port_ref) == NULL)
return;
@@ -452,7 +417,6 @@ static void subscr_conn_msg_event(void *usr_handle,
* timeout code cannot delete the subscription,
* so the subscription object is still protected.
*/
-
tipc_nametbl_subscribe(sub);
}
}
@@ -461,7 +425,6 @@ static void subscr_conn_msg_event(void *usr_handle,
/**
* subscr_named_msg_event - handle request to establish a new subscriber
*/
-
static void subscr_named_msg_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
@@ -475,7 +438,6 @@ static void subscr_named_msg_event(void *usr_handle,
u32 server_port_ref;
/* Create subscriber object */
-
subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
warn("Subscriber rejected, no memory\n");
@@ -485,7 +447,6 @@ static void subscr_named_msg_event(void *usr_handle,
INIT_LIST_HEAD(&subscriber->subscriber_list);
/* Create server port & establish connection to subscriber */
-
tipc_createport(subscriber,
importance,
NULL,
@@ -504,26 +465,21 @@ static void subscr_named_msg_event(void *usr_handle,
tipc_connect2port(subscriber->port_ref, orig);
/* Lock server port (& save lock address for future use) */
-
subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
/* Add subscriber to topology server's subscriber list */
-
spin_lock_bh(&topsrv.lock);
list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
spin_unlock_bh(&topsrv.lock);
/* Unlock server port */
-
server_port_ref = subscriber->port_ref;
spin_unlock_bh(subscriber->lock);
/* Send an ACK- to complete connection handshaking */
-
tipc_send(server_port_ref, 0, NULL, 0);
/* Handle optional subscription request */
-
if (size != 0) {
subscr_conn_msg_event(subscriber, server_port_ref,
buf, data, size);
@@ -535,7 +491,6 @@ int tipc_subscr_start(void)
struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
int res;
- memset(&topsrv, 0, sizeof(topsrv));
spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ef6529c8456f..218d2e07f0cc 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -51,7 +51,6 @@ struct tipc_subscription;
* @swap: indicates if subscriber uses opposite endianness in its messages
* @evt: template for events generated by subscription
*/
-
struct tipc_subscription {
struct tipc_name_seq seq;
u32 timeout;
@@ -80,5 +79,4 @@ int tipc_subscr_start(void);
void tipc_subscr_stop(void);
-
#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d510353ef431..641f2e47f165 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -149,9 +149,10 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
* each socket state is protected by separate spin lock.
*/
-static inline unsigned unix_hash_fold(__wsum n)
+static inline unsigned int unix_hash_fold(__wsum n)
{
- unsigned hash = (__force unsigned)n;
+ unsigned int hash = (__force unsigned int)n;
+
hash ^= hash>>16;
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
@@ -200,7 +201,7 @@ static inline void unix_release_addr(struct unix_address *addr)
* - if started by zero, it is abstract name.
*/
-static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
+static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
@@ -250,7 +251,7 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
static struct sock *__unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
- int len, int type, unsigned hash)
+ int len, int type, unsigned int hash)
{
struct sock *s;
struct hlist_node *node;
@@ -273,7 +274,7 @@ found:
static inline struct sock *unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, int type,
- unsigned hash)
+ unsigned int hash)
{
struct sock *s;
@@ -760,7 +761,7 @@ out: mutex_unlock(&u->readlock);
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunname, int len,
- int type, unsigned hash, int *error)
+ int type, unsigned int hash, int *error)
{
struct sock *u;
struct path path;
@@ -824,7 +825,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct dentry *dentry = NULL;
struct path path;
int err;
- unsigned hash;
+ unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
@@ -964,7 +965,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
struct net *net = sock_net(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *other;
- unsigned hash;
+ unsigned int hash;
int err;
if (addr->sa_family != AF_UNSPEC) {
@@ -1062,7 +1063,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sock *newsk = NULL;
struct sock *other = NULL;
struct sk_buff *skb = NULL;
- unsigned hash;
+ unsigned int hash;
int st;
int err;
long timeo;
@@ -1437,11 +1438,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
- unsigned hash;
+ unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
int max_level;
+ int data_len = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
@@ -1475,7 +1477,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (len > sk->sk_sndbuf - 32)
goto out;
- skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
+ if (len > SKB_MAX_ALLOC)
+ data_len = min_t(size_t,
+ len - SKB_MAX_ALLOC,
+ MAX_SKB_FRAGS * PAGE_SIZE);
+
+ skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
@@ -1485,8 +1493,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
max_level = err + 1;
unix_get_secdata(siocb->scm, skb);
- skb_reset_transport_header(skb);
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ skb_put(skb, len - data_len);
+ skb->data_len = data_len;
+ skb->len = len;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
if (err)
goto out_free;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index f0486ae9ebe6..47d3002737f5 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -310,7 +310,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
}
-static struct sock_diag_handler unix_diag_handler = {
+static const struct sock_diag_handler unix_diag_handler = {
.family = AF_UNIX,
.dump = unix_diag_handler_dump,
};
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 397cffebb3b6..b34b5b9792f0 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,12 +26,6 @@ static ctl_table unix_table[] = {
{ }
};
-static struct ctl_path unix_path[] = {
- { .procname = "net", },
- { .procname = "unix", },
- { },
-};
-
int __net_init unix_sysctl_register(struct net *net)
{
struct ctl_table *table;
@@ -41,7 +35,7 @@ int __net_init unix_sysctl_register(struct net *net)
goto err_alloc;
table[0].data = &net->unx.sysctl_max_dgram_qlen;
- net->unx.ctl = register_net_sysctl_table(net, unix_path, table);
+ net->unx.ctl = register_net_sysctl(net, "net/unix", table);
if (net->unx.ctl == NULL)
goto err_reg;
@@ -58,6 +52,6 @@ void unix_sysctl_unregister(struct net *net)
struct ctl_table *table;
table = net->unx.ctl->ctl_table_arg;
- unregister_sysctl_table(net->unx.ctl);
+ unregister_net_sysctl_table(net->unx.ctl);
kfree(table);
}
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0b9566..a157a2e64e18 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
#
config WAN_ROUTER
- tristate "WAN router"
+ tristate "WAN router (DEPRECATED)"
depends on EXPERIMENTAL
---help---
Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3c65eae701c4..a6470ac39498 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -187,7 +187,7 @@ out:
static
void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
- unsigned allowed_states_bm)
+ unsigned int allowed_states_bm)
{
if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
@@ -425,7 +425,8 @@ static
size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
unsigned char *addr, size_t addr_len)
{
- unsigned cnt, total;
+ unsigned int cnt, total;
+
for (total = cnt = 0; cnt < addr_len; cnt++)
total += scnprintf(addr_str + total, addr_str_size - total,
"%02x%c", addr[cnt],
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2fcfe0993ca2..884801ac4dd0 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ccdfed897651..a87d43552974 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -422,10 +422,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
const struct ieee80211_iface_combination *c;
int i, j;
- /* If we have combinations enforce them */
- if (wiphy->n_iface_combinations)
- wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
-
for (i = 0; i < wiphy->n_iface_combinations; i++) {
u32 cnt = 0;
u16 all_iftypes = 0;
@@ -668,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
mutex_lock(&rdev->devlist_mtx);
__count = rdev->opencount;
mutex_unlock(&rdev->devlist_mtx);
- __count == 0;}));
+ __count == 0; }));
mutex_lock(&rdev->devlist_mtx);
BUG_ON(!list_empty(&rdev->netdev_list));
@@ -708,6 +704,10 @@ void wiphy_unregister(struct wiphy *wiphy)
flush_work(&rdev->scan_done_wk);
cancel_work_sync(&rdev->conn_work);
flush_work(&rdev->event_work);
+
+ if (rdev->wowlan && rdev->ops->set_wakeup)
+ rdev->ops->set_wakeup(&rdev->wiphy, false);
+ cfg80211_rdev_free_wowlan(rdev);
}
EXPORT_SYMBOL(wiphy_unregister);
@@ -720,7 +720,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
mutex_destroy(&rdev->sched_scan_mtx);
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&scan->pub);
- cfg80211_rdev_free_wowlan(rdev);
kfree(rdev);
}
@@ -777,7 +776,7 @@ static struct device_type wiphy_type = {
.name = "wlan",
};
-static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state,
void *ndev)
{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3ac2dd00d714..8523f3878677 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, int freq,
enum nl80211_channel_type channel_type);
-u16 cfg80211_calculate_bitrate(struct rate_info *rate);
-
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask);
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 9bde4d1d3e9b..7eecdf40cf80 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -68,6 +68,32 @@ static int cfg80211_set_ringparam(struct net_device *dev,
return -ENOTSUPP;
}
+static int cfg80211_get_sset_count(struct net_device *dev, int sset)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ if (rdev->ops->get_et_sset_count)
+ return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset);
+ return -EOPNOTSUPP;
+}
+
+static void cfg80211_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ if (rdev->ops->get_et_stats)
+ rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data);
+}
+
+static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ if (rdev->ops->get_et_strings)
+ rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data);
+}
+
const struct ethtool_ops cfg80211_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
.get_regs_len = cfg80211_get_regs_len,
@@ -75,4 +101,7 @@ const struct ethtool_ops cfg80211_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = cfg80211_get_ringparam,
.set_ringparam = cfg80211_set_ringparam,
+ .get_strings = cfg80211_get_strings,
+ .get_ethtool_stats = cfg80211_get_stats,
+ .get_sset_count = cfg80211_get_sset_count,
};
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 30f20fe4a5fe..d2a19b0ff71f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -473,7 +473,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
/* fixed already - and no change */
if (wdev->wext.ibss.bssid && bssid &&
- compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
+ ether_addr_equal(bssid, wdev->wext.ibss.bssid))
return 0;
wdev_lock(wdev);
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 755738d26bb4..1526c211db66 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -304,10 +304,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n",
+ hdr->addr2);
key->dot11RSNAStatsCCMPFormatErrors++;
return -2;
}
@@ -318,11 +316,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -6;
}
if (!key->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
+ net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
+ hdr->addr2, keyidx);
return -3;
}
@@ -336,15 +331,11 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (ccmp_replay_check(pn, key->rx_pn)) {
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: replay detected: STA=%pM "
- "previous PN %02x%02x%02x%02x%02x%02x "
- "received PN %02x%02x%02x%02x%02x%02x\n",
- hdr->addr2,
- key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
- key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
- pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
- }
+ net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n",
+ hdr->addr2,
+ key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
+ key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
+ pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
#endif
key->dot11RSNAStatsCCMPReplays++;
return -4;
@@ -370,10 +361,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "CCMP: decrypt failed: STA="
- "%pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n",
+ hdr->addr2);
key->dot11RSNAStatsCCMPDecryptErrors++;
return -5;
}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 38734846c19e..d475cfc8568f 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -360,12 +360,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct scatterlist sg;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- if (net_ratelimit()) {
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *)skb->data;
- printk(KERN_DEBUG ": TKIP countermeasures: dropped "
- "TX packet to %pM\n", hdr->addr1);
- }
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n",
+ hdr->addr1);
return -1;
}
@@ -420,10 +417,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct ieee80211_hdr *)skb->data;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP countermeasures: dropped "
- "received packet from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n",
+ hdr->addr2);
return -1;
}
@@ -433,10 +428,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet without ExtIV"
- " flag from %pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n",
+ hdr->addr2);
return -2;
}
keyidx >>= 6;
@@ -446,11 +439,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -6;
}
if (!tkey->key_set) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet from %pM"
- " with keyid=%d that does not have a configured"
- " key\n", hdr->addr2, keyidx);
- }
+ net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n",
+ hdr->addr2, keyidx);
return -3;
}
iv16 = (pos[0] << 8) | pos[2];
@@ -459,12 +449,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
- " previous TSC %08x%04x received TSC "
- "%08x%04x\n", hdr->addr2,
- tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
- }
+ net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n",
+ hdr->addr2, tkey->rx_iv32, tkey->rx_iv16,
+ iv32, iv16);
#endif
tkey->dot11RSNAStatsTKIPReplays++;
return -4;
@@ -481,11 +468,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP: failed to decrypt "
- "received packet from %pM\n",
- hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
+ hdr->addr2);
return -7;
}
@@ -501,10 +485,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->rx_phase1_done = 0;
}
#ifdef CONFIG_LIB80211_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: ICV error detected: STA="
- "%pM\n", hdr->addr2);
- }
+ net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n",
+ hdr->addr2);
#endif
tkey->dot11RSNAStatsTKIPICVErrors++;
return -5;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index ba21ab22187b..2749cb86b462 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -38,6 +38,7 @@
#define MESH_MAX_PREQ_RETRIES 4
+#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50
const struct mesh_config default_mesh_config = {
.dot11MeshRetryTimeout = MESH_RET_T,
@@ -48,6 +49,7 @@ const struct mesh_config default_mesh_config = {
.element_ttl = MESH_DEFAULT_ELEMENT_TTL,
.auto_open_plinks = true,
.dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
+ .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX,
.dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
.dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
.dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
@@ -59,9 +61,11 @@ const struct mesh_config default_mesh_config = {
.dot11MeshGateAnnouncementProtocol = false,
.dot11MeshForwarding = true,
.rssi_threshold = MESH_RSSI_THRESHOLD,
+ .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
};
const struct mesh_setup default_mesh_setup = {
+ .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
.path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
.path_metric = IEEE80211_PATH_METRIC_AIRTIME,
.ie = NULL,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f5a7ac3a0939..eb90988bbd36 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
#include <linux/slab.h>
@@ -100,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
if (wdev->current_bss &&
- memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -115,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
- from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+ from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
} else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -154,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
return;
if (wdev->current_bss &&
- memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_sme_disassoc(dev, wdev->current_bss);
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
@@ -165,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+ from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
}
EXPORT_SYMBOL(__cfg80211_send_disassoc);
@@ -285,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
return -EINVAL;
if (wdev->current_bss &&
- memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0)
+ ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
return -EALREADY;
memset(&req, 0, sizeof(req));
@@ -362,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
if (wdev->current_bss && prev_bssid &&
- memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
/*
* Trying to reassociate: Allow this to proceed and let the old
* association to be dropped when the new one is completed.
@@ -446,7 +447,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
if (local_state_change) {
if (wdev->current_bss &&
- memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -495,7 +496,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
- if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
req.bss = &wdev->current_bss->pub;
else
return -ENOTCONN;
@@ -758,8 +759,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
break;
}
- if (memcmp(wdev->current_bss->pub.bssid,
- mgmt->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ mgmt->bssid)) {
err = -ENOTCONN;
break;
}
@@ -772,8 +773,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
break;
/* for station, check that DA is the AP */
- if (memcmp(wdev->current_bss->pub.bssid,
- mgmt->da, ETH_ALEN)) {
+ if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+ mgmt->da)) {
err = -ENOTCONN;
break;
}
@@ -781,11 +782,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP_VLAN:
- if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) {
err = -EINVAL;
break;
}
@@ -804,7 +805,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return err;
}
- if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
+ if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
return -EINVAL;
/* Transmit the Action frame as requested by user space */
@@ -928,6 +929,33 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
}
EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
+ enum nl80211_channel_type type)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct ieee80211_channel *chan;
+
+ wdev_lock(wdev);
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO))
+ goto out;
+
+ chan = rdev_freq_to_chan(rdev, freq, type);
+ if (WARN_ON(!chan))
+ goto out;
+
+ wdev->channel = chan;
+
+ nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
+out:
+ wdev_unlock(wdev);
+ return;
+}
+EXPORT_SYMBOL(cfg80211_ch_switch_notify);
+
bool cfg80211_rx_spurious_frame(struct net_device *dev,
const u8 *addr, gfp_t gfp)
{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e49da2797022..206465dc0cab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -356,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
static int nl80211_msg_put_channel(struct sk_buff *msg,
struct ieee80211_channel *chan)
{
- NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
- chan->center_freq);
+ if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
+ chan->center_freq))
+ goto nla_put_failure;
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
- if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
- if (chan->flags & IEEE80211_CHAN_NO_IBSS)
- NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
- if (chan->flags & IEEE80211_CHAN_RADAR)
- NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
+ if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+ goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
- DBM_TO_MBM(chan->max_power));
+ if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+ DBM_TO_MBM(chan->max_power)))
+ goto nla_put_failure;
return 0;
@@ -621,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
i = 0;
while (ifmodes) {
- if (ifmodes & 1)
- NLA_PUT_FLAG(msg, i);
+ if ((ifmodes & 1) && nla_put_flag(msg, i))
+ goto nla_put_failure;
ifmodes >>= 1;
i++;
}
@@ -665,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
nl_limit = nla_nest_start(msg, j + 1);
if (!nl_limit)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
- c->limits[j].max);
+ if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
+ c->limits[j].max))
+ goto nla_put_failure;
if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
c->limits[j].types))
goto nla_put_failure;
@@ -675,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
nla_nest_end(msg, nl_limits);
- if (c->beacon_int_infra_match)
- NLA_PUT_FLAG(msg,
- NL80211_IFACE_COMB_STA_AP_BI_MATCH);
- NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
- c->num_different_channels);
- NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
- c->max_interfaces);
+ if (c->beacon_int_infra_match &&
+ nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+ c->num_different_channels) ||
+ nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+ c->max_interfaces))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combi);
}
@@ -712,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
- NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
-
- NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
- cfg80211_rdev_list_generation);
-
- NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
- dev->wiphy.retry_short);
- NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
- dev->wiphy.retry_long);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
- dev->wiphy.frag_threshold);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
- dev->wiphy.rts_threshold);
- NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
- dev->wiphy.coverage_class);
- NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
- dev->wiphy.max_scan_ssids);
- NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
- dev->wiphy.max_sched_scan_ssids);
- NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
- dev->wiphy.max_scan_ie_len);
- NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
- dev->wiphy.max_sched_scan_ie_len);
- NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
- dev->wiphy.max_match_sets);
-
- if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
- NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
- if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
- NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
- if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
- NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
- if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
- NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
- if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
- NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
- if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
- NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
-
- NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
- sizeof(u32) * dev->wiphy.n_cipher_suites,
- dev->wiphy.cipher_suites);
-
- NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
- dev->wiphy.max_num_pmkids);
-
- if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
- NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
-
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
- dev->wiphy.available_antennas_tx);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
- dev->wiphy.available_antennas_rx);
-
- if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
- NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
- dev->wiphy.probe_resp_offload);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+ nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
+ nla_put_u32(msg, NL80211_ATTR_GENERATION,
+ cfg80211_rdev_list_generation) ||
+ nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
+ dev->wiphy.retry_short) ||
+ nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
+ dev->wiphy.retry_long) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+ dev->wiphy.frag_threshold) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
+ dev->wiphy.rts_threshold) ||
+ nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+ dev->wiphy.coverage_class) ||
+ nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
+ dev->wiphy.max_scan_ssids) ||
+ nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+ dev->wiphy.max_sched_scan_ssids) ||
+ nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
+ dev->wiphy.max_scan_ie_len) ||
+ nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+ dev->wiphy.max_sched_scan_ie_len) ||
+ nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+ dev->wiphy.max_match_sets))
+ goto nla_put_failure;
+
+ if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+ nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
+ goto nla_put_failure;
+ if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+ nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
+ goto nla_put_failure;
+ if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+ nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
+ goto nla_put_failure;
+ if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+ nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
+ goto nla_put_failure;
+ if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
+ goto nla_put_failure;
+ if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+ nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
+ goto nla_put_failure;
+
+ if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
+ sizeof(u32) * dev->wiphy.n_cipher_suites,
+ dev->wiphy.cipher_suites))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
+ dev->wiphy.max_num_pmkids))
+ goto nla_put_failure;
+
+ if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+ nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
+ dev->wiphy.available_antennas_tx) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
+ dev->wiphy.available_antennas_rx))
+ goto nla_put_failure;
+
+ if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+ nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+ dev->wiphy.probe_resp_offload))
+ goto nla_put_failure;
if ((dev->wiphy.available_antennas_tx ||
dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -777,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
int res;
res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
if (!res) {
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
+ tx_ant) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
+ rx_ant))
+ goto nla_put_failure;
}
}
@@ -799,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
goto nla_put_failure;
/* add HT info */
- if (dev->wiphy.bands[band]->ht_cap.ht_supported) {
- NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET,
- sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
- &dev->wiphy.bands[band]->ht_cap.mcs);
- NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA,
- dev->wiphy.bands[band]->ht_cap.cap);
- NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
- dev->wiphy.bands[band]->ht_cap.ampdu_factor);
- NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
- dev->wiphy.bands[band]->ht_cap.ampdu_density);
- }
+ if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
+ (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
+ sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
+ &dev->wiphy.bands[band]->ht_cap.mcs) ||
+ nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
+ dev->wiphy.bands[band]->ht_cap.cap) ||
+ nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
+ dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
+ nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+ dev->wiphy.bands[band]->ht_cap.ampdu_density)))
+ goto nla_put_failure;
/* add frequencies */
nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -842,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
goto nla_put_failure;
rate = &dev->wiphy.bands[band]->bitrates[i];
- NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
- rate->bitrate);
- if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- NLA_PUT_FLAG(msg,
- NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
+ if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
+ rate->bitrate))
+ goto nla_put_failure;
+ if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
+ nla_put_flag(msg,
+ NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
+ goto nla_put_failure;
nla_nest_end(msg, nl_rate);
}
@@ -866,7 +889,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
do { \
if (dev->ops->op) { \
i++; \
- NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \
+ if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
+ goto nla_put_failure; \
} \
} while (0)
@@ -894,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
- NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
+ if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
+ goto nla_put_failure;
}
CMD(set_channel, SET_CHANNEL);
CMD(set_wds_peer, SET_WDS_PEER);
@@ -908,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(set_noack_map, SET_NOACK_MAP);
if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
i++;
- NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+ if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
+ goto nla_put_failure;
}
#ifdef CONFIG_NL80211_TESTMODE
@@ -919,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (dev->ops->connect || dev->ops->auth) {
i++;
- NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT);
+ if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
+ goto nla_put_failure;
}
if (dev->ops->disconnect || dev->ops->deauth) {
i++;
- NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT);
+ if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
+ goto nla_put_failure;
}
nla_nest_end(msg, nl_cmds);
if (dev->ops->remain_on_channel &&
- dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
- NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
- dev->wiphy.max_remain_on_channel_duration);
+ (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+ nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
+ dev->wiphy.max_remain_on_channel_duration))
+ goto nla_put_failure;
- if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
- NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
+ if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+ nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
+ goto nla_put_failure;
if (mgmt_stypes) {
u16 stypes;
@@ -953,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
i = 0;
stypes = mgmt_stypes[ift].tx;
while (stypes) {
- if (stypes & 1)
- NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
- (i << 4) | IEEE80211_FTYPE_MGMT);
+ if ((stypes & 1) &&
+ nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+ (i << 4) | IEEE80211_FTYPE_MGMT))
+ goto nla_put_failure;
stypes >>= 1;
i++;
}
@@ -975,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
i = 0;
stypes = mgmt_stypes[ift].rx;
while (stypes) {
- if (stypes & 1)
- NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
- (i << 4) | IEEE80211_FTYPE_MGMT);
+ if ((stypes & 1) &&
+ nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+ (i << 4) | IEEE80211_FTYPE_MGMT))
+ goto nla_put_failure;
stypes >>= 1;
i++;
}
@@ -994,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (!nl_wowlan)
goto nla_put_failure;
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
- if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+ if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+ ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+ goto nla_put_failure;
if (dev->wiphy.wowlan.n_patterns) {
struct nl80211_wowlan_pattern_support pat = {
.max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1018,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
.max_pattern_len =
dev->wiphy.wowlan.pattern_max_len,
};
- NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
- sizeof(pat), &pat);
+ if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
+ sizeof(pat), &pat))
+ goto nla_put_failure;
}
nla_nest_end(msg, nl_wowlan);
@@ -1032,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nl80211_put_iface_combinations(&dev->wiphy, msg))
goto nla_put_failure;
- if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
- NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
- dev->wiphy.ap_sme_capa);
+ if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+ nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
+ dev->wiphy.ap_sme_capa))
+ goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+ if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
+ dev->wiphy.features))
+ goto nla_put_failure;
- if (dev->wiphy.ht_capa_mod_mask)
- NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
- sizeof(*dev->wiphy.ht_capa_mod_mask),
- dev->wiphy.ht_capa_mod_mask);
+ if (dev->wiphy.ht_capa_mod_mask &&
+ nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+ sizeof(*dev->wiphy.ht_capa_mod_mask),
+ dev->wiphy.ht_capa_mod_mask))
+ goto nla_put_failure;
return genlmsg_end(msg, hdr);
@@ -1104,17 +1142,20 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = {
static int parse_txq_params(struct nlattr *tb[],
struct ieee80211_txq_params *txq_params)
{
- if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] ||
+ if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
!tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
!tb[NL80211_TXQ_ATTR_AIFS])
return -EINVAL;
- txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]);
+ txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
+ if (txq_params->ac >= NL80211_NUM_ACS)
+ return -EINVAL;
+
return 0;
}
@@ -1138,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
+static bool nl80211_valid_channel_type(struct genl_info *info,
+ enum nl80211_channel_type *channel_type)
+{
+ enum nl80211_channel_type tmp;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
+ return false;
+
+ tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (tmp != NL80211_CHAN_NO_HT &&
+ tmp != NL80211_CHAN_HT20 &&
+ tmp != NL80211_CHAN_HT40PLUS &&
+ tmp != NL80211_CHAN_HT40MINUS)
+ return false;
+
+ if (channel_type)
+ *channel_type = tmp;
+
+ return true;
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct genl_info *info)
@@ -1152,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (!nl80211_can_set_dev_channel(wdev))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(info->attrs[
- NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -1294,6 +1350,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
}
+ if (!netif_running(netdev)) {
+ result = -ENETDOWN;
+ goto bad_res;
+ }
+
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
@@ -1484,14 +1545,28 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
- NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+ nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+ dev->ieee80211_ptr->iftype) ||
+ nla_put_u32(msg, NL80211_ATTR_GENERATION,
+ rdev->devlist_generation ^
+ (cfg80211_rdev_list_generation << 2)))
+ goto nla_put_failure;
+
+ if (rdev->ops->get_channel) {
+ struct ieee80211_channel *chan;
+ enum nl80211_channel_type channel_type;
- NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
- rdev->devlist_generation ^
- (cfg80211_rdev_list_generation << 2));
+ chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
+ if (chan &&
+ (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
+ chan->center_freq) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+ channel_type)))
+ goto nla_put_failure;
+ }
return genlmsg_end(msg, hdr);
@@ -1789,35 +1864,34 @@ static void get_key_callback(void *c, struct key_params *params)
struct nlattr *key;
struct get_key_cookie *cookie = c;
- if (params->key)
- NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA,
- params->key_len, params->key);
-
- if (params->seq)
- NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ,
- params->seq_len, params->seq);
-
- if (params->cipher)
- NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
- params->cipher);
+ if ((params->key &&
+ nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+ params->key_len, params->key)) ||
+ (params->seq &&
+ nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+ params->seq_len, params->seq)) ||
+ (params->cipher &&
+ nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
+ params->cipher)))
+ goto nla_put_failure;
key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
if (!key)
goto nla_put_failure;
- if (params->key)
- NLA_PUT(cookie->msg, NL80211_KEY_DATA,
- params->key_len, params->key);
-
- if (params->seq)
- NLA_PUT(cookie->msg, NL80211_KEY_SEQ,
- params->seq_len, params->seq);
-
- if (params->cipher)
- NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER,
- params->cipher);
+ if ((params->key &&
+ nla_put(cookie->msg, NL80211_KEY_DATA,
+ params->key_len, params->key)) ||
+ (params->seq &&
+ nla_put(cookie->msg, NL80211_KEY_SEQ,
+ params->seq_len, params->seq)) ||
+ (params->cipher &&
+ nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
+ params->cipher)))
+ goto nla_put_failure;
- NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx);
+ if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+ goto nla_put_failure;
nla_nest_end(cookie->msg, key);
@@ -1875,10 +1949,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
cookie.msg = msg;
cookie.idx = key_idx;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx);
- if (mac_addr)
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
+ goto nla_put_failure;
+ if (mac_addr &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+ goto nla_put_failure;
if (pairwise && mac_addr &&
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2349,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
return -EINVAL;
}
- for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
- if (flags[flag])
+ for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
+ if (flags[flag]) {
params->sta_flags_set |= (1<<flag);
+ /* no longer support new API additions in old API */
+ if (flag > NL80211_STA_FLAG_MAX_OLD_API)
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -2368,15 +2450,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
bitrate = cfg80211_calculate_bitrate(info);
- if (bitrate > 0)
- NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
- if (info->flags & RATE_INFO_FLAGS_MCS)
- NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
- if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
- NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
- if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
- NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+ if ((bitrate > 0 &&
+ nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+ ((info->flags & RATE_INFO_FLAGS_MCS) &&
+ nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
+ ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
+ nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
+ ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
+ nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
+ goto nla_put_failure;
nla_nest_end(msg, rate);
return true;
@@ -2398,43 +2480,50 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
-
- NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+ nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
+ goto nla_put_failure;
sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
if (!sinfoattr)
goto nla_put_failure;
- if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
- NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
- sinfo->connected_time);
- if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
- NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
- sinfo->inactive_time);
- if (sinfo->filled & STATION_INFO_RX_BYTES)
- NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES,
- sinfo->rx_bytes);
- if (sinfo->filled & STATION_INFO_TX_BYTES)
- NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES,
- sinfo->tx_bytes);
- if (sinfo->filled & STATION_INFO_LLID)
- NLA_PUT_U16(msg, NL80211_STA_INFO_LLID,
- sinfo->llid);
- if (sinfo->filled & STATION_INFO_PLID)
- NLA_PUT_U16(msg, NL80211_STA_INFO_PLID,
- sinfo->plid);
- if (sinfo->filled & STATION_INFO_PLINK_STATE)
- NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
- sinfo->plink_state);
+ if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
+ nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
+ sinfo->connected_time))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
+ nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
+ sinfo->inactive_time))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
+ nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+ sinfo->rx_bytes))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+ sinfo->tx_bytes))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_LLID) &&
+ nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_PLID) &&
+ nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
+ nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
+ sinfo->plink_state))
+ goto nla_put_failure;
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
- if (sinfo->filled & STATION_INFO_SIGNAL)
- NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
- sinfo->signal);
- if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
- NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
- sinfo->signal_avg);
+ if ((sinfo->filled & STATION_INFO_SIGNAL) &&
+ nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
+ sinfo->signal))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
+ nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+ sinfo->signal_avg))
+ goto nla_put_failure;
break;
default:
break;
@@ -2449,49 +2538,60 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
NL80211_STA_INFO_RX_BITRATE))
goto nla_put_failure;
}
- if (sinfo->filled & STATION_INFO_RX_PACKETS)
- NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
- sinfo->rx_packets);
- if (sinfo->filled & STATION_INFO_TX_PACKETS)
- NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS,
- sinfo->tx_packets);
- if (sinfo->filled & STATION_INFO_TX_RETRIES)
- NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES,
- sinfo->tx_retries);
- if (sinfo->filled & STATION_INFO_TX_FAILED)
- NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
- sinfo->tx_failed);
- if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
- NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
- sinfo->beacon_loss_count);
+ if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
+ nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
+ sinfo->rx_packets))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
+ sinfo->tx_packets))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
+ sinfo->tx_retries))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
+ sinfo->tx_failed))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
+ nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
+ sinfo->beacon_loss_count))
+ goto nla_put_failure;
if (sinfo->filled & STATION_INFO_BSS_PARAM) {
bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
if (!bss_param)
goto nla_put_failure;
- if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
- NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
- if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
- NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
- if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
- NLA_PUT_FLAG(msg,
- NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
- NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
- sinfo->bss_param.dtim_period);
- NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
- sinfo->bss_param.beacon_interval);
+ if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
+ nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+ ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+ nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+ ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+ nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+ nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+ sinfo->bss_param.dtim_period) ||
+ nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+ sinfo->bss_param.beacon_interval))
+ goto nla_put_failure;
nla_nest_end(msg, bss_param);
}
- if (sinfo->filled & STATION_INFO_STA_FLAGS)
- NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
- sizeof(struct nl80211_sta_flag_update),
- &sinfo->sta_flags);
+ if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
+ nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
+ sizeof(struct nl80211_sta_flag_update),
+ &sinfo->sta_flags))
+ goto nla_put_failure;
+ if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
+ nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
+ sinfo->t_offset))
+ goto nla_put_failure;
nla_nest_end(msg, sinfoattr);
- if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
- NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
- sinfo->assoc_req_ies);
+ if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
+ nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+ sinfo->assoc_req_ies))
+ goto nla_put_failure;
return genlmsg_end(msg, hdr);
@@ -2913,36 +3013,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
- NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
-
- NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
+ nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
+ nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
+ goto nla_put_failure;
pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
if (!pinfoattr)
goto nla_put_failure;
- if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
- pinfo->frame_qlen);
- if (pinfo->filled & MPATH_INFO_SN)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
- pinfo->sn);
- if (pinfo->filled & MPATH_INFO_METRIC)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
- pinfo->metric);
- if (pinfo->filled & MPATH_INFO_EXPTIME)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME,
- pinfo->exptime);
- if (pinfo->filled & MPATH_INFO_FLAGS)
- NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS,
- pinfo->flags);
- if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
- pinfo->discovery_timeout);
- if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES)
- NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
- pinfo->discovery_retries);
+ if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
+ pinfo->frame_qlen))
+ goto nla_put_failure;
+ if (((pinfo->filled & MPATH_INFO_SN) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
+ ((pinfo->filled & MPATH_INFO_METRIC) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
+ pinfo->metric)) ||
+ ((pinfo->filled & MPATH_INFO_EXPTIME) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
+ pinfo->exptime)) ||
+ ((pinfo->filled & MPATH_INFO_FLAGS) &&
+ nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
+ pinfo->flags)) ||
+ ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
+ pinfo->discovery_timeout)) ||
+ ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
+ nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
+ pinfo->discovery_retries)))
+ goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
@@ -3268,47 +3369,52 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
if (!pinfoattr)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
- cur_params.dot11MeshRetryTimeout);
- NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
- cur_params.dot11MeshConfirmTimeout);
- NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
- cur_params.dot11MeshHoldingTimeout);
- NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
- cur_params.dot11MeshMaxPeerLinks);
- NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES,
- cur_params.dot11MeshMaxRetries);
- NLA_PUT_U8(msg, NL80211_MESHCONF_TTL,
- cur_params.dot11MeshTTL);
- NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL,
- cur_params.element_ttl);
- NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
- cur_params.auto_open_plinks);
- NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
- cur_params.dot11MeshHWMPmaxPREQretries);
- NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
- cur_params.path_refresh_time);
- NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
- cur_params.min_discovery_timeout);
- NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
- cur_params.dot11MeshHWMPactivePathTimeout);
- NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
- cur_params.dot11MeshHWMPpreqMinInterval);
- NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
- cur_params.dot11MeshHWMPperrMinInterval);
- NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
- cur_params.dot11MeshHWMPnetDiameterTraversalTime);
- NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
- cur_params.dot11MeshHWMPRootMode);
- NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
- cur_params.dot11MeshHWMPRannInterval);
- NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
- cur_params.dot11MeshGateAnnouncementProtocol);
- NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
- cur_params.dot11MeshForwarding);
- NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
- cur_params.rssi_threshold);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
+ cur_params.dot11MeshRetryTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ cur_params.dot11MeshConfirmTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
+ cur_params.dot11MeshHoldingTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
+ cur_params.dot11MeshMaxPeerLinks) ||
+ nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
+ cur_params.dot11MeshMaxRetries) ||
+ nla_put_u8(msg, NL80211_MESHCONF_TTL,
+ cur_params.dot11MeshTTL) ||
+ nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
+ cur_params.element_ttl) ||
+ nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ cur_params.auto_open_plinks) ||
+ nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+ cur_params.dot11MeshNbrOffsetMaxNeighbor) ||
+ nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ cur_params.dot11MeshHWMPmaxPREQretries) ||
+ nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
+ cur_params.path_refresh_time) ||
+ nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ cur_params.min_discovery_timeout) ||
+ nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+ cur_params.dot11MeshHWMPactivePathTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+ cur_params.dot11MeshHWMPpreqMinInterval) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+ cur_params.dot11MeshHWMPperrMinInterval) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+ cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
+ nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
+ cur_params.dot11MeshHWMPRootMode) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ cur_params.dot11MeshHWMPRannInterval) ||
+ nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ cur_params.dot11MeshGateAnnouncementProtocol) ||
+ nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
+ cur_params.dot11MeshForwarding) ||
+ nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+ cur_params.rssi_threshold) ||
+ nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
+ cur_params.ht_opmode))
+ goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -3329,6 +3435,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
[NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
[NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
@@ -3342,10 +3449,12 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
[NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
[NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
+ [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16},
};
static const struct nla_policy
nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
+ [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
@@ -3398,6 +3507,9 @@ do {\
mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
+ mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
nla_get_u8);
@@ -3435,6 +3547,8 @@ do {\
mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
+ mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16);
if (mask_out)
*mask_out = mask;
@@ -3455,6 +3569,12 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
nl80211_mesh_setup_params_policy))
return -EINVAL;
+ if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])
+ setup->sync_method =
+ (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ?
+ IEEE80211_SYNC_METHOD_VENDOR :
+ IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET;
+
if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
setup->path_sel_proto =
(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
@@ -3539,11 +3659,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
if (!hdr)
goto put_failure;
- NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
- cfg80211_regdomain->alpha2);
- if (cfg80211_regdomain->dfs_region)
- NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
- cfg80211_regdomain->dfs_region);
+ if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+ cfg80211_regdomain->alpha2) ||
+ (cfg80211_regdomain->dfs_region &&
+ nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
+ cfg80211_regdomain->dfs_region)))
+ goto nla_put_failure;
nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
if (!nl_reg_rules)
@@ -3563,18 +3684,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
if (!nl_reg_rule)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS,
- reg_rule->flags);
- NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START,
- freq_range->start_freq_khz);
- NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END,
- freq_range->end_freq_khz);
- NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
- freq_range->max_bandwidth_khz);
- NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
- power_rule->max_antenna_gain);
- NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
- power_rule->max_eirp);
+ if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
+ reg_rule->flags) ||
+ nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
+ freq_range->start_freq_khz) ||
+ nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
+ freq_range->end_freq_khz) ||
+ nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
+ freq_range->max_bandwidth_khz) ||
+ nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
+ power_rule->max_antenna_gain) ||
+ nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
+ power_rule->max_eirp))
+ goto nla_put_failure;
nla_nest_end(msg, nl_reg_rule);
}
@@ -4145,37 +4267,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
genl_dump_check_consistent(cb, hdr, &nl80211_fam);
- NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
+ goto nla_put_failure;
bss = nla_nest_start(msg, NL80211_ATTR_BSS);
if (!bss)
goto nla_put_failure;
- if (!is_zero_ether_addr(res->bssid))
- NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid);
- if (res->information_elements && res->len_information_elements)
- NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
- res->len_information_elements,
- res->information_elements);
- if (res->beacon_ies && res->len_beacon_ies &&
- res->beacon_ies != res->information_elements)
- NLA_PUT(msg, NL80211_BSS_BEACON_IES,
- res->len_beacon_ies, res->beacon_ies);
- if (res->tsf)
- NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
- if (res->beacon_interval)
- NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval);
- NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability);
- NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq);
- NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO,
- jiffies_to_msecs(jiffies - intbss->ts));
+ if ((!is_zero_ether_addr(res->bssid) &&
+ nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
+ (res->information_elements && res->len_information_elements &&
+ nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
+ res->len_information_elements,
+ res->information_elements)) ||
+ (res->beacon_ies && res->len_beacon_ies &&
+ res->beacon_ies != res->information_elements &&
+ nla_put(msg, NL80211_BSS_BEACON_IES,
+ res->len_beacon_ies, res->beacon_ies)))
+ goto nla_put_failure;
+ if (res->tsf &&
+ nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
+ goto nla_put_failure;
+ if (res->beacon_interval &&
+ nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
+ nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+ nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
+ jiffies_to_msecs(jiffies - intbss->ts)))
+ goto nla_put_failure;
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
- NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal);
+ if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
+ goto nla_put_failure;
break;
case CFG80211_SIGNAL_TYPE_UNSPEC:
- NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal);
+ if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
+ goto nla_put_failure;
break;
default:
break;
@@ -4184,14 +4313,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- if (intbss == wdev->current_bss)
- NLA_PUT_U32(msg, NL80211_BSS_STATUS,
- NL80211_BSS_STATUS_ASSOCIATED);
+ if (intbss == wdev->current_bss &&
+ nla_put_u32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_ASSOCIATED))
+ goto nla_put_failure;
break;
case NL80211_IFTYPE_ADHOC:
- if (intbss == wdev->current_bss)
- NLA_PUT_U32(msg, NL80211_BSS_STATUS,
- NL80211_BSS_STATUS_IBSS_JOINED);
+ if (intbss == wdev->current_bss &&
+ nla_put_u32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_IBSS_JOINED))
+ goto nla_put_failure;
break;
default:
break;
@@ -4260,34 +4391,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
if (!hdr)
return -ENOMEM;
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
if (!infoattr)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
- survey->channel->center_freq);
- if (survey->filled & SURVEY_INFO_NOISE_DBM)
- NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
- survey->noise);
- if (survey->filled & SURVEY_INFO_IN_USE)
- NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE);
- if (survey->filled & SURVEY_INFO_CHANNEL_TIME)
- NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
- survey->channel_time);
- if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
- NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
- survey->channel_time_busy);
- if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
- NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
- survey->channel_time_ext_busy);
- if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX)
- NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
- survey->channel_time_rx);
- if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX)
- NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
- survey->channel_time_tx);
+ if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+ survey->channel->center_freq))
+ goto nla_put_failure;
+
+ if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
+ nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_IN_USE) &&
+ nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
+ survey->channel_time))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
+ survey->channel_time_busy))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
+ survey->channel_time_ext_busy))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
+ survey->channel_time_rx))
+ goto nla_put_failure;
+ if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
+ nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+ survey->channel_time_tx))
+ goto nla_put_failure;
nla_nest_end(msg, infoattr);
@@ -4793,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type channel_type;
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40MINUS &&
- channel_type != NL80211_CHAN_HT40PLUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
if (channel_type != NL80211_CHAN_NO_HT &&
@@ -4968,7 +5103,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
- if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
+ if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
genlmsg_cancel(skb, hdr);
break;
}
@@ -5019,7 +5154,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
return NULL;
}
- NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+ goto nla_put_failure;
data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
((void **)skb->cb)[0] = rdev;
@@ -5365,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
!(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
@@ -5398,7 +5528,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
if (err)
goto free_msg;
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -5540,6 +5671,9 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
sband,
nla_data(tb[NL80211_TXRATE_LEGACY]),
nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if ((mask.control[band].legacy == 0) &&
+ nla_len(tb[NL80211_TXRATE_LEGACY]))
+ return -EINVAL;
}
if (tb[NL80211_TXRATE_MCS]) {
if (!ht_rateset_to_mask(
@@ -5640,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
channel_type_valid = true;
}
@@ -5685,7 +5814,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
goto free_msg;
if (msg) {
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -5790,7 +5920,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
else
ps_state = NL80211_PS_DISABLED;
- NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
+ if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -5937,20 +6068,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!nl_wowlan)
goto nla_put_failure;
- if (rdev->wowlan->any)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
- if (rdev->wowlan->disconnect)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
- if (rdev->wowlan->magic_pkt)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
- if (rdev->wowlan->gtk_rekey_failure)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
- if (rdev->wowlan->eap_identity_req)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
- if (rdev->wowlan->four_way_handshake)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
- if (rdev->wowlan->rfkill_release)
- NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+ if ((rdev->wowlan->any &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+ (rdev->wowlan->disconnect &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+ (rdev->wowlan->magic_pkt &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+ (rdev->wowlan->gtk_rekey_failure &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+ (rdev->wowlan->eap_identity_req &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+ (rdev->wowlan->four_way_handshake &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+ (rdev->wowlan->rfkill_release &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+ goto nla_put_failure;
if (rdev->wowlan->n_patterns) {
struct nlattr *nl_pats, *nl_pat;
int i, pat_len;
@@ -5965,12 +6097,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!nl_pat)
goto nla_put_failure;
pat_len = rdev->wowlan->patterns[i].pattern_len;
- NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
- DIV_ROUND_UP(pat_len, 8),
- rdev->wowlan->patterns[i].mask);
- NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
- pat_len,
- rdev->wowlan->patterns[i].pattern);
+ if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
+ DIV_ROUND_UP(pat_len, 8),
+ rdev->wowlan->patterns[i].mask) ||
+ nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
+ pat_len,
+ rdev->wowlan->patterns[i].pattern))
+ goto nla_put_failure;
nla_nest_end(msg, nl_pat);
}
nla_nest_end(msg, nl_pats);
@@ -5995,6 +6128,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_wowlan new_triggers = {};
struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
int err, i;
+ bool prev_enabled = rdev->wowlan;
if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
return -EOPNOTSUPP;
@@ -6127,6 +6261,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
rdev->wowlan = NULL;
}
+ if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
+ rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
+
return 0;
error:
for (i = 0; i < new_triggers.n_patterns; i++)
@@ -6243,7 +6380,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
if (err)
goto free_msg;
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -6384,7 +6522,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_get_key,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6416,7 +6554,7 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_set_beacon,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6424,7 +6562,7 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_start_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6432,7 +6570,7 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_stop_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6448,7 +6586,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_set_station,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6464,7 +6602,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_del_station,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6497,7 +6635,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_del_mpath,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6505,7 +6643,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_set_bss,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6531,7 +6669,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_get_mesh_config,
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6664,7 +6802,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_setdel_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6672,7 +6810,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_setdel_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6680,7 +6818,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_flush_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6840,7 +6978,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_probe_client,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6911,19 +7049,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < req->n_ssids; i++)
- NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
+ for (i = 0; i < req->n_ssids; i++) {
+ if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
+ goto nla_put_failure;
+ }
nla_nest_end(msg, nest);
nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < req->n_channels; i++)
- NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
+ for (i = 0; i < req->n_channels; i++) {
+ if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+ goto nla_put_failure;
+ }
nla_nest_end(msg, nest);
- if (req->ie)
- NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
+ if (req->ie &&
+ nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -6942,8 +7085,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
/* ignore errors and send incomplete event anyway */
nl80211_add_scan_req(msg, rdev);
@@ -6967,8 +7111,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
return genlmsg_end(msg, hdr);
@@ -7091,26 +7236,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
}
/* Userspace can always count this one always being set */
- NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator);
-
- if (request->alpha2[0] == '0' && request->alpha2[1] == '0')
- NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
- NL80211_REGDOM_TYPE_WORLD);
- else if (request->alpha2[0] == '9' && request->alpha2[1] == '9')
- NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
- NL80211_REGDOM_TYPE_CUSTOM_WORLD);
- else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
- request->intersect)
- NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
- NL80211_REGDOM_TYPE_INTERSECTION);
- else {
- NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
- NL80211_REGDOM_TYPE_COUNTRY);
- NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2);
- }
-
- if (wiphy_idx_valid(request->wiphy_idx))
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
+ if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
+ goto nla_put_failure;
+
+ if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
+ if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+ NL80211_REGDOM_TYPE_WORLD))
+ goto nla_put_failure;
+ } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
+ if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+ NL80211_REGDOM_TYPE_CUSTOM_WORLD))
+ goto nla_put_failure;
+ } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
+ request->intersect) {
+ if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+ NL80211_REGDOM_TYPE_INTERSECTION))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+ NL80211_REGDOM_TYPE_COUNTRY) ||
+ nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+ request->alpha2))
+ goto nla_put_failure;
+ }
+
+ if (wiphy_idx_valid(request->wiphy_idx) &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7144,9 +7296,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7224,10 +7377,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7275,15 +7429,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- if (bssid)
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
- NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status);
- if (req_ie)
- NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
- if (resp_ie)
- NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+ nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
+ (req_ie &&
+ nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+ (resp_ie &&
+ nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7315,13 +7469,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
- if (req_ie)
- NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
- if (resp_ie)
- NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
+ (req_ie &&
+ nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+ (resp_ie &&
+ nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7352,14 +7507,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- if (from_ap && reason)
- NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
- if (from_ap)
- NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
- if (ie)
- NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (from_ap && reason &&
+ nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
+ (from_ap &&
+ nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
+ (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7390,9 +7545,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7423,11 +7579,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
- if (ie_len && ie)
- NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
+ (ie_len && ie &&
+ nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7458,15 +7615,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- if (addr)
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
- NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
- if (key_id != -1)
- NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
- if (tsc)
- NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
+ nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
+ (key_id != -1 &&
+ nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
+ (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7501,7 +7657,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
* Since we are applying the beacon hint to a wiphy we know its
* wiphy_idx is valid
*/
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+ goto nla_put_failure;
/* Before */
nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7553,14 +7710,16 @@ static void nl80211_send_remain_on_chan_event(
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
+ nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+ goto nla_put_failure;
- if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
- NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+ if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
+ nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7631,8 +7790,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7668,9 +7828,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
return true;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+ goto nla_put_failure;
err = genlmsg_end(msg, hdr);
if (err < 0) {
@@ -7719,12 +7880,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
return -ENOMEM;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
- if (sig_dbm)
- NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
- NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+ (sig_dbm &&
+ nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7754,12 +7916,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
- if (ack)
- NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+ nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+ (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -7791,15 +7953,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
if (!pinfoattr)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
- rssi_event);
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ rssi_event))
+ goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
@@ -7832,16 +7996,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+ goto nla_put_failure;
rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
if (!rekey_attr)
goto nla_put_failure;
- NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR,
- NL80211_REPLAY_CTR_LEN, replay_ctr);
+ if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
+ NL80211_REPLAY_CTR_LEN, replay_ctr))
+ goto nla_put_failure;
nla_nest_end(msg, rekey_attr);
@@ -7874,17 +8040,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
if (!attr)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
- NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
- if (preauth)
- NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+ if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
+ nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
+ (preauth &&
+ nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
+ goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -7899,6 +8067,39 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, int freq,
+ enum nl80211_channel_type type, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
void
nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
@@ -7918,15 +8119,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+ goto nla_put_failure;
pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
if (!pinfoattr)
goto nla_put_failure;
- NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
+ goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
@@ -7960,12 +8163,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
- NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
- if (acked)
- NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+ nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+ (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
+ goto nla_put_failure;
err = genlmsg_end(msg, hdr);
if (err < 0) {
@@ -8005,12 +8208,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
return;
}
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- if (freq)
- NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
- if (sig_dbm)
- NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
- NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ (freq &&
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
+ (sig_dbm &&
+ nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+ nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ffe50df9f31..01a1122c3b33 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -118,6 +118,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, int index,
const u8 *bssid, bool preauth, gfp_t gfp);
+void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, int freq,
+ enum nl80211_channel_type type, gfp_t gfp);
+
bool nl80211_unexpected_frame(struct net_device *dev,
const u8 *addr, gfp_t gfp);
bool nl80211_unexpected_4addr_frame(struct net_device *dev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e9a0ac83b84c..15f347477a99 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -388,7 +388,15 @@ static void reg_regdb_query(const char *alpha2)
schedule_work(&reg_regdb_work);
}
+
+/* Feel free to add any other sanity checks here */
+static void reg_regdb_size_check(void)
+{
+ /* We should ideally BUILD_BUG_ON() but then random builds would fail */
+ WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
+}
#else
+static inline void reg_regdb_size_check(void) {}
static inline void reg_regdb_query(const char *alpha2) {}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
@@ -2322,6 +2330,8 @@ int __init regulatory_init(void)
spin_lock_init(&reg_requests_lock);
spin_lock_init(&reg_pending_beacons_lock);
+ reg_regdb_size_check();
+
cfg80211_regdomain = cfg80211_world_regdom;
user_alpha2[0] = '9';
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 70faadf16a32..af2b1caa37fa 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -18,7 +18,7 @@
#include "nl80211.h"
#include "wext-compat.h"
-#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
{
@@ -281,7 +281,7 @@ static bool is_bss(struct cfg80211_bss *a,
{
const u8 *ssidie;
- if (bssid && compare_ether_addr(a->bssid, bssid))
+ if (bssid && !ether_addr_equal(a->bssid, bssid))
return false;
if (!ssid)
@@ -378,7 +378,11 @@ static int cmp_bss_core(struct cfg80211_bss *a,
b->len_information_elements);
}
- return memcmp(a->bssid, b->bssid, ETH_ALEN);
+ /*
+ * we can't use compare_ether_addr here since we need a < > operator.
+ * The binary return value of compare_ether_addr isn't enough
+ */
+ return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
}
static int cmp_bss(struct cfg80211_bss *a,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1b7a08df933c..55d99466babb 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -370,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
iftype != NL80211_IFTYPE_P2P_CLIENT &&
iftype != NL80211_IFTYPE_MESH_POINT) ||
(is_multicast_ether_addr(dst) &&
- !compare_ether_addr(src, addr)))
+ ether_addr_equal(src, addr)))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
@@ -398,9 +398,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
payload = skb->data + hdrlen;
ethertype = (payload[6] << 8) | payload[7];
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ if (likely((ether_addr_equal(payload, rfc1042_header) &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
+ ether_addr_equal(payload, bridge_tunnel_header))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
skb_pull(skb, hdrlen + 6);
@@ -609,10 +609,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
payload = frame->data;
ethertype = (payload[6] << 8) | payload[7];
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ if (likely((ether_addr_equal(payload, rfc1042_header) &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload,
- bridge_tunnel_header) == 0)) {
+ ether_addr_equal(payload, bridge_tunnel_header))) {
/* remove RFC1042 or Bridge-Tunnel
* encapsulation and replace EtherType */
skb_pull(frame, 6);
@@ -880,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
return rate->legacy;
/* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
+ if (WARN_ON_ONCE(rate->mcs >= 32))
return 0;
modulation = rate->mcs & 7;
@@ -946,13 +945,6 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.software_iftypes & BIT(iftype))
return 0;
- /*
- * Drivers will gradually all set this flag, until all
- * have it we only enforce for those that set it.
- */
- if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
- return 0;
-
memset(num, 0, sizeof(num));
num[iftype] = 1;
@@ -972,6 +964,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
}
mutex_unlock(&rdev->devlist_mtx);
+ if (total == 1)
+ return 0;
+
for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
const struct ieee80211_iface_combination *c;
struct ieee80211_iface_limit *limits;
@@ -989,7 +984,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.software_iftypes & BIT(iftype))
continue;
for (j = 0; j < c->n_limits; j++) {
- if (!(limits[j].types & iftype))
+ if (!(limits[j].types & BIT(iftype)))
continue;
if (limits[j].max < num[iftype])
goto cont;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3c24eb97e9d7..6a6181a673ca 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -821,6 +821,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct ieee80211_channel *chan;
+ enum nl80211_channel_type channel_type;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -831,7 +832,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
if (!rdev->ops->get_channel)
return -EINVAL;
- chan = rdev->ops->get_channel(wdev->wiphy);
+ chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
if (!chan)
return -EINVAL;
freq->m = chan->center_freq;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0af7f54e4f61..b0eb7aa49b60 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -256,7 +256,7 @@ static const struct iw_ioctl_description standard_ioctl[] = {
.max_tokens = sizeof(struct iw_pmksa),
},
};
-static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
+static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
/*
* Meta-data about all the additional standard Wireless Extension events
@@ -306,7 +306,7 @@ static const struct iw_ioctl_description standard_event[] = {
.max_tokens = sizeof(struct iw_pmkid_cand),
},
};
-static const unsigned standard_event_num = ARRAY_SIZE(standard_event);
+static const unsigned int standard_event_num = ARRAY_SIZE(standard_event);
/* Size (in bytes) of various events */
static const int event_type_size[] = {
@@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */
- NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+ if (nla_put_string(skb, IFLA_IFNAME, dev->name))
+ goto nla_put_failure;
return nlh;
nla_put_failure:
@@ -428,7 +429,7 @@ void wireless_send_event(struct net_device * dev,
int hdr_len; /* Size of the event header */
int wrqu_off = 0; /* Offset in wrqu */
/* Don't "optimise" the following variable, it will crash */
- unsigned cmd_index; /* *MUST* be unsigned */
+ unsigned int cmd_index; /* *MUST* be unsigned */
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct nlattr *nla;
@@ -780,8 +781,10 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
if (cmd == SIOCSIWENCODEEXT) {
struct iw_encode_ext *ee = (void *) extra;
- if (iwp->length < sizeof(*ee) + ee->key_len)
- return -EFAULT;
+ if (iwp->length < sizeof(*ee) + ee->key_len) {
+ err = -EFAULT;
+ goto out;
+ }
}
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7c01c2f3b6cf..7decbd357d51 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -276,7 +276,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
/* fixed already - and no change */
if (wdev->wext.connect.bssid && bssid &&
- compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0)
+ ether_addr_equal(bssid, wdev->wext.connect.bssid))
goto out;
err = __cfg80211_disconnect(rdev, dev,
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 5d643a548feb..33bef22e44e9 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -203,7 +203,7 @@ void wireless_spy_update(struct net_device * dev,
/* Update all records that match */
for (i = 0; i < spydata->spy_number; i++)
- if (!compare_ether_addr(address, spydata->spy_address[i])) {
+ if (ether_addr_equal(address, spydata->spy_address[i])) {
memcpy(&(spydata->spy_stat[i]), wstats,
sizeof(struct iw_quality));
match = i;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index d2efd29f434e..43239527a205 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,18 +73,12 @@ static struct ctl_table x25_table[] = {
{ 0, },
};
-static struct ctl_path x25_path[] = {
- { .procname = "net", },
- { .procname = "x25", },
- { }
-};
-
void __init x25_register_sysctl(void)
{
- x25_table_header = register_sysctl_paths(x25_path, x25_table);
+ x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
}
void x25_unregister_sysctl(void)
{
- unregister_sysctl_table(x25_table_header);
+ unregister_net_sysctl_table(x25_table_header);
}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862d1f46..a8a236338e61 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
- queued = !sk_add_backlog(sk, skb);
+ queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
}
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 36384a1fa9f2..66c638730c7a 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -231,7 +231,7 @@ int x25_create_facilities(unsigned char *buffer,
}
if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
- unsigned bytecount = (dte_facs->calling_len + 1) >> 1;
+ unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
*p++ = X25_FAC_CALLING_AE;
*p++ = 1 + bytecount;
*p++ = dte_facs->calling_len;
@@ -240,7 +240,7 @@ int x25_create_facilities(unsigned char *buffer,
}
if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
- unsigned bytecount = (dte_facs->called_len % 2) ?
+ unsigned int bytecount = (dte_facs->called_len % 2) ?
dte_facs->called_len / 2 + 1 :
dte_facs->called_len / 2;
*p++ = X25_FAC_CALLED_AE;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 6d081674515f..ce90b8d92365 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -3,12 +3,17 @@
#
config XFRM
bool
- select CRYPTO
depends on NET
+config XFRM_ALGO
+ tristate
+ select XFRM
+ select CRYPTO
+
config XFRM_USER
tristate "Transformation user configuration interface"
- depends on INET && XFRM
+ depends on INET
+ select XFRM_ALGO
---help---
Support for Transformation(XFRM) user configuration interface
like IPsec used by native Linux tools.
@@ -48,13 +53,13 @@ config XFRM_STATISTICS
config XFRM_IPCOMP
tristate
- select XFRM
+ select XFRM_ALGO
select CRYPTO
select CRYPTO_DEFLATE
config NET_KEY
tristate "PF_KEY sockets"
- select XFRM
+ select XFRM_ALGO
---help---
PF_KEYv2 socket family, compatible to KAME ones.
They are required if you are going to use IPsec tools ported
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index aa429eefe919..c0e961983f17 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -3,8 +3,9 @@
#
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
- xfrm_input.o xfrm_output.o xfrm_algo.o \
+ xfrm_input.o xfrm_output.o \
xfrm_sysctl.o xfrm_replay.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
+obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 791ab2e77f3f..4ce2d93162c1 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -15,9 +15,6 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
-#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
-#include <net/ah.h>
-#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
#include <net/esp.h>
#endif
@@ -752,3 +749,5 @@ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
}
EXPORT_SYMBOL_GPL(pskb_put);
#endif
+
+MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 7199d78b2aa1..716502ada53b 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -45,10 +45,10 @@ static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
return (h ^ (h >> 16)) & hmask;
}
-static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
- const xfrm_address_t *saddr,
- unsigned short family,
- unsigned int hmask)
+static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ unsigned short family,
+ unsigned int hmask)
{
unsigned int h = family;
switch (family) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7661576b6f45..ccfbd328a69d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,6 +26,7 @@
#include <linux/cache.h>
#include <linux/audit.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/xfrm.h>
#include <net/ip.h>
#ifdef CONFIG_XFRM_STATISTICS
@@ -56,7 +57,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
-static inline int
+static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
@@ -69,7 +70,7 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
}
-static inline int
+static inline bool
__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi6 *fl6 = &fl->u.ip6;
@@ -82,8 +83,8 @@ __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
}
-int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
- unsigned short family)
+bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+ unsigned short family)
{
switch (family) {
case AF_INET:
@@ -91,7 +92,7 @@ int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
case AF_INET6:
return __xfrm6_selector_match(sel, fl);
}
- return 0;
+ return false;
}
static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
@@ -877,7 +878,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
u8 type, u16 family, int dir)
{
const struct xfrm_selector *sel = &pol->selector;
- int match, ret = -ESRCH;
+ int ret = -ESRCH;
+ bool match;
if (pol->family != family ||
(fl->flowi_mark & pol->mark.m) != pol->mark.v ||
@@ -1006,8 +1008,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
read_lock_bh(&xfrm_policy_lock);
if ((pol = sk->sk_policy[dir]) != NULL) {
- int match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl,
+ sk->sk_family);
int err = 0;
if (match) {
@@ -1919,6 +1921,9 @@ no_transform:
}
ok:
xfrm_pols_put(pols, drop_pols);
+ if (dst && dst->xfrm &&
+ dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+ dst->flags |= DST_XFRM_TUNNEL;
return dst;
nopol:
@@ -2767,8 +2772,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
- const struct xfrm_selector *sel_tgt)
+static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+ const struct xfrm_selector *sel_tgt)
{
if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
if (sel_tgt->family == sel_cmp->family &&
@@ -2778,14 +2783,14 @@ static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
sel_cmp->family) == 0 &&
sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return 1;
+ return true;
}
} else {
if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05640bc9594b..380976f74c4c 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,7 +54,7 @@ int __net_init xfrm_sysctl_init(struct net *net)
table[2].data = &net->xfrm.sysctl_larval_drop;
table[3].data = &net->xfrm.sysctl_acq_expires;
- net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table);
+ net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
if (!net->xfrm.sysctl_hdr)
goto out_register;
return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7128dde0fe1a..44293b3fd6a1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
{
copy_to_user_state(x, p);
- if (x->coaddr)
- NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+ if (x->coaddr &&
+ nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
+ goto nla_put_failure;
- if (x->lastused)
- NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
+ if (x->lastused &&
+ nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
+ goto nla_put_failure;
- if (x->aead)
- NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
- if (x->aalg) {
- if (copy_to_user_auth(x->aalg, skb))
- goto nla_put_failure;
+ if (x->aead &&
+ nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
+ goto nla_put_failure;
- NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
- xfrm_alg_auth_len(x->aalg), x->aalg);
- }
- if (x->ealg)
- NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
- if (x->calg)
- NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+ if (x->aalg &&
+ (copy_to_user_auth(x->aalg, skb) ||
+ nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+ xfrm_alg_auth_len(x->aalg), x->aalg)))
+ goto nla_put_failure;
- if (x->encap)
- NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+ if (x->ealg &&
+ nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
+ goto nla_put_failure;
- if (x->tfcpad)
- NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
+ if (x->calg &&
+ nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
+ goto nla_put_failure;
+
+ if (x->encap &&
+ nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
+ goto nla_put_failure;
+
+ if (x->tfcpad &&
+ nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
+ goto nla_put_failure;
if (xfrm_mark_put(skb, &x->mark))
goto nla_put_failure;
- if (x->replay_esn)
- NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+ if (x->replay_esn &&
+ nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn))
+ goto nla_put_failure;
- if (x->security && copy_sec_ctx(x->security, skb) < 0)
+ if (x->security && copy_sec_ctx(x->security, skb))
goto nla_put_failure;
return 0;
@@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
- NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
- NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+ if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
+ nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
- NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
- NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+ if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
+ nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
id->reqid = x->props.reqid;
id->flags = c->data.aevent;
- if (x->replay_esn)
- NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn),
- x->replay_esn);
- else
- NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
-
- NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+ if (x->replay_esn) {
+ if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn))
+ goto nla_put_failure;
+ } else {
+ if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+ &x->replay))
+ goto nla_put_failure;
+ }
+ if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
+ goto nla_put_failure;
- if (id->flags & XFRM_AE_RTHR)
- NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
+ if ((id->flags & XFRM_AE_RTHR) &&
+ nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
+ goto nla_put_failure;
- if (id->flags & XFRM_AE_ETHR)
- NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
- x->replay_maxage * 10 / HZ);
+ if ((id->flags & XFRM_AE_ETHR) &&
+ nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+ x->replay_maxage * 10 / HZ))
+ goto nla_put_failure;
if (xfrm_mark_put(skb, &x->mark))
goto nla_put_failure;
@@ -2835,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto,
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
- if (addr)
- NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
+ if (addr &&
+ nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);