summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-10-09 12:56:02 -0300
committerJason Gunthorpe <jgg@nvidia.com>2020-10-16 12:40:58 -0300
commit16e7483e6f02973972f832b18042fd6c45fe26c0 (patch)
tree205122d1996a983619ccd55c572c34f037a76718 /arch/x86/kernel/kvm.c
parentbf6a47644ea0928b2a6589ba9fb1221116d8bfaf (diff)
parent0c16d9635e3a51377e5815b9f8e14f497a4dbb42 (diff)
Merge branch 'dynamic_sg' into rdma.git for-next
From Maor Gottlieb says: ==================== This series extends __sg_alloc_table_from_pages to allow chaining of new pages to an already initialized SG table. This allows for drivers to utilize the optimization of merging contiguous pages without a need to pre allocate all the pages and hold them in a very large temporary buffer prior to the call to SG table initialization. The last patch changes the Infiniband core to use the new API. It removes duplicate functionality from the code and benefits from the optimization of allocating dynamic SG table from pages. In huge pages system of 2MB page size, without this change, the SG table would contain x512 SG entries. ==================== * branch 'dynamic_sg': RDMA/umem: Move to allocate SG table from pages lib/scatterlist: Add support in dynamic allocation of SG table from pages tools/testing/scatterlist: Show errors in human readable form tools/testing/scatterlist: Rejuvenate bit-rotten test
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c22
1 files changed, 3 insertions, 19 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 1b51b727b140..9663ba31347c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -652,6 +652,7 @@ static void __init kvm_guest_init(void)
}
if (pv_tlb_flush_supported()) {
+ pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
@@ -764,14 +765,6 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
-static void kvm_free_pv_cpu_mask(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
-}
-
static __init int kvm_alloc_cpumask(void)
{
int cpu;
@@ -790,20 +783,11 @@ static __init int kvm_alloc_cpumask(void)
if (alloc)
for_each_possible_cpu(cpu) {
- if (!zalloc_cpumask_var_node(
- per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu))) {
- goto zalloc_cpumask_fail;
- }
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
}
- apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
- pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
return 0;
-
-zalloc_cpumask_fail:
- kvm_free_pv_cpu_mask();
- return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);