summaryrefslogtreecommitdiff
path: root/tools/sched_ext/scx_qmap.bpf.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2024-11-12 13:47:28 -0400
committerJason Gunthorpe <jgg@nvidia.com>2024-11-12 13:47:28 -0400
commit4e6bd13aa33c78346973f01c7303b4909d79ec86 (patch)
tree6205937f65a4d859dcecaf48da8801a1e4103ce7 /tools/sched_ext/scx_qmap.bpf.c
parentb047c0644f4e0aafd202e9519a22e17651fbee70 (diff)
parentf6681abd413919472d8a142420b639a3a8d84673 (diff)
Merge branch 'iommufd/arm-smmuv3-nested' of iommu/linux into iommufd for-next
Common SMMUv3 patches for the following patches adding nesting, shared branch with the iommu tree. * 'iommufd/arm-smmuv3-nested' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/iommu/linux: iommu/arm-smmu-v3: Expose the arm_smmu_attach interface iommu/arm-smmu-v3: Implement IOMMU_HWPT_ALLOC_NEST_PARENT iommu/arm-smmu-v3: Support IOMMU_GET_HW_INFO via struct arm_smmu_hw_info iommu/arm-smmu-v3: Report IOMMU_CAP_ENFORCE_CACHE_COHERENCY for CANWBS ACPI/IORT: Support CANWBS memory access flag ACPICA: IORT: Update for revision E.f vfio: Remove VFIO_TYPE1_NESTING_IOMMU ... Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'tools/sched_ext/scx_qmap.bpf.c')
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index 83c8f54c1e31..5d1f880d1149 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -230,8 +230,8 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
return;
}
- /* if !WAKEUP, select_cpu() wasn't called, try direct dispatch */
- if (!(enq_flags & SCX_ENQ_WAKEUP) &&
+ /* if select_cpu() wasn't called, try direct dispatch */
+ if (!(enq_flags & SCX_ENQ_CPU_SELECTED) &&
(cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) {
__sync_fetch_and_add(&nr_ddsp_from_enq, 1);
scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags);
@@ -318,11 +318,11 @@ static bool dispatch_highpri(bool from_timer)
if (tctx->highpri) {
/* exercise the set_*() and vtime interface too */
- scx_bpf_dispatch_from_dsq_set_slice(
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(
BPF_FOR_EACH_ITER, slice_ns * 2);
- scx_bpf_dispatch_from_dsq_set_vtime(
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(
BPF_FOR_EACH_ITER, highpri_seq++);
- scx_bpf_dispatch_vtime_from_dsq(
+ __COMPAT_scx_bpf_dispatch_vtime_from_dsq(
BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
}
}
@@ -340,9 +340,9 @@ static bool dispatch_highpri(bool from_timer)
else
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
- if (scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
- SCX_DSQ_LOCAL_ON | cpu,
- SCX_ENQ_PREEMPT)) {
+ if (__COMPAT_scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
+ SCX_DSQ_LOCAL_ON | cpu,
+ SCX_ENQ_PREEMPT)) {
if (cpu == this_cpu) {
dispatched = true;
__sync_fetch_and_add(&nr_expedited_local, 1);