diff options
| author | David S. Miller <davem@davemloft.net> | 2018-12-15 10:58:32 -0800 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2018-12-15 10:58:32 -0800 | 
| commit | 10589a568f2ec531975504c98c1bed88c233a63d (patch) | |
| tree | f2e986932c85bdbb5c1531fdd890b9f05664cf50 /tools/testing/selftests/bpf/bpf_flow.c | |
| parent | 143ece654f9f5b37bedea252a990be37e48ae3a5 (diff) | |
| parent | 7640ead939247e91e84b7ec6ec001f30193cc7df (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says:
====================
pull-request: bpf 2018-12-15
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) fix liveness propagation of callee saved registers, from Jakub.
2) fix overflow in bpf_jit_limit knob, from Daniel.
3) bpf_flow_dissector api fix, from Stanislav.
4) bpf_perf_event api fix on powerpc, from Sandipan.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing/selftests/bpf/bpf_flow.c')
| -rw-r--r-- | tools/testing/selftests/bpf/bpf_flow.c | 36 | 
1 files changed, 17 insertions, 19 deletions
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c index 107350a7821d..df9d32fd2055 100644 --- a/tools/testing/selftests/bpf/bpf_flow.c +++ b/tools/testing/selftests/bpf/bpf_flow.c @@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,  {  	void *data_end = (void *)(long)skb->data_end;  	void *data = (void *)(long)skb->data; -	__u16 nhoff = skb->flow_keys->nhoff; +	__u16 thoff = skb->flow_keys->thoff;  	__u8 *hdr;  	/* Verifies this variable offset does not overflow */ -	if (nhoff > (USHRT_MAX - hdr_size)) +	if (thoff > (USHRT_MAX - hdr_size))  		return NULL; -	hdr = data + nhoff; +	hdr = data + thoff;  	if (hdr + hdr_size <= data_end)  		return hdr; -	if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) +	if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))  		return NULL;  	return buffer; @@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)  			/* Only inspect standard GRE packets with version 0 */  			return BPF_OK; -		keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ +		keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */  		if (GRE_IS_CSUM(gre->flags)) -			keys->nhoff += 4; /* Step over chksum and Padding */ +			keys->thoff += 4; /* Step over chksum and Padding */  		if (GRE_IS_KEY(gre->flags)) -			keys->nhoff += 4; /* Step over key */ +			keys->thoff += 4; /* Step over key */  		if (GRE_IS_SEQ(gre->flags)) -			keys->nhoff += 4; /* Step over sequence number */ +			keys->thoff += 4; /* Step over sequence number */  		keys->is_encap = true; @@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)  			if (!eth)  				return BPF_DROP; -			keys->nhoff += sizeof(*eth); +			keys->thoff += sizeof(*eth);  			return parse_eth_proto(skb, eth->h_proto);  		} else { @@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)  		if ((__u8 *)tcp + (tcp->doff << 2) > data_end)  			return BPF_DROP; -		keys->thoff = keys->nhoff;  		keys->sport = tcp->source;  		keys->dport = tcp->dest;  		return BPF_OK; @@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)  		if (!udp)  			return BPF_DROP; -		keys->thoff = keys->nhoff;  		keys->sport = udp->source;  		keys->dport = udp->dest;  		return BPF_OK; @@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)  	keys->ipv4_src = iph->saddr;  	keys->ipv4_dst = iph->daddr; -	keys->nhoff += iph->ihl << 2; -	if (data + keys->nhoff > data_end) +	keys->thoff += iph->ihl << 2; +	if (data + keys->thoff > data_end)  		return BPF_DROP;  	if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { @@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)  	keys->addr_proto = ETH_P_IPV6;  	memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); -	keys->nhoff += sizeof(struct ipv6hdr); +	keys->thoff += sizeof(struct ipv6hdr);  	return parse_ipv6_proto(skb, ip6h->nexthdr);  } @@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)  	/* hlen is in 8-octets and does not include the first 8 bytes  	 * of the header  	 */ -	skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; +	skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;  	return parse_ipv6_proto(skb, ip6h->nexthdr);  } @@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)  	if (!fragh)  		return BPF_DROP; -	keys->nhoff += sizeof(*fragh); +	keys->thoff += sizeof(*fragh);  	keys->is_frag = true;  	if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))  		keys->is_first_frag = true; @@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)  	__be16 proto;  	/* Peek back to see if single or double-tagging */ -	if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, +	if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,  			       sizeof(proto)))  		return BPF_DROP; @@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)  		if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))  			return BPF_DROP; -		keys->nhoff += sizeof(*vlan); +		keys->thoff += sizeof(*vlan);  	}  	vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);  	if (!vlan)  		return BPF_DROP; -	keys->nhoff += sizeof(*vlan); +	keys->thoff += sizeof(*vlan);  	/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/  	if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||  	    vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))  | 
