summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorDaniel Xu <dxu@dxuuu.xyz>2024-09-05 19:22:44 -0600
committerDaniel Borkmann <daniel@iogearbox.net>2024-09-11 16:32:11 +0200
commit23dc9867329c72b48e5039ac93fbf50d9099cdb3 (patch)
tree9389d70ff28cdf36db750e3c15d703417742a01d /kernel/bpf
parentd41905b3bb890a32c87b65228b241daad8837fb0 (diff)
bpf, cpumap: Move xdp:xdp_cpumap_kthread tracepoint before rcv
cpumap takes RX processing out of softirq and onto a separate kthread. Since the kthread needs to be scheduled in order to run (versus softirq which does not), we can theoretically experience extra latency if the system is under load and the scheduler is being unfair to us. Moving the tracepoint to before passing the skb list up the stack allows users to more accurately measure enqueue/dequeue latency introduced by cpumap via xdp:xdp_cpumap_enqueue and xdp:xdp_cpumap_kthread tracepoints. f9419f7bd7a5 ("bpf: cpumap add tracepoints") which added the tracepoints states that the intent behind them was for general observability and for a feedback loop to see if the queues are being overwhelmed. This change does not mess with either of those use cases but rather adds a third one. Signed-off-by: Daniel Xu <dxu@dxuuu.xyz> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Link: https://lore.kernel.org/bpf/47615d5b5e302e4bd30220473779e98b492d47cd.1725585718.git.dxu@dxuuu.xyz
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/cpumap.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbdf5a1aabfe..a2f46785ac3b 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -354,12 +354,14 @@ static int cpu_map_kthread_run(void *data)
list_add_tail(&skb->list, &list);
}
- netif_receive_skb_list(&list);
- /* Feedback loop via tracepoint */
+ /* Feedback loop via tracepoint.
+ * NB: keep before recv to allow measuring enqueue/dequeue latency.
+ */
trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
sched, &stats);
+ netif_receive_skb_list(&list);
local_bh_enable(); /* resched point, may call do_softirq() */
}
__set_current_state(TASK_RUNNING);