summaryrefslogtreecommitdiff
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/emulate.c133
-rw-r--r--arch/mips/kvm/interrupt.h1
-rw-r--r--arch/mips/kvm/locore.S95
-rw-r--r--arch/mips/kvm/mips.c28
-rw-r--r--arch/mips/kvm/tlb.c61
-rw-r--r--arch/mips/kvm/trap_emul.c7
6 files changed, 165 insertions, 160 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b37954cc880d..645c8a1982a7 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*/
static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
- ktime_t expires;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ ktime_t expires, threshold;
+ uint32_t count, compare;
int running;
- /* Is the hrtimer pending? */
+ /* Calculate the biased and scaled guest CP0_Count */
+ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ compare = kvm_read_c0_guest_compare(cop0);
+
+ /*
+ * Find whether CP0_Count has reached the closest timer interrupt. If
+ * not, we shouldn't inject it.
+ */
+ if ((int32_t)(count - compare) < 0)
+ return count;
+
+ /*
+ * The CP0_Count we're going to return has already reached the closest
+ * timer interrupt. Quickly check if it really is a new interrupt by
+ * looking at whether the interval until the hrtimer expiry time is
+ * less than 1/4 of the timer period.
+ */
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
- if (ktime_compare(now, expires) >= 0) {
+ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+ if (ktime_before(expires, threshold)) {
/*
* Cancel it while we handle it so there's no chance of
* interference with the timeout handler.
@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
}
}
- /* Return the biased and scaled guest CP0_Count */
- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ return count;
}
/**
@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
}
/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu: Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
- ktime_t now;
- uint32_t count;
-
- /*
- * freeze_hrtimer takes care of a timer interrupts <= count, and
- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
- */
- now = kvm_mips_freeze_hrtimer(vcpu, &count);
- kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
* kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU.
* @count: Guest CP0_Count value to set.
@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
* kvm_mips_write_compare() - Modify compare and update timer.
* @vcpu: Virtual CPU.
* @compare: New CP0_Compare value.
+ * @ack: Whether to acknowledge timer interrupt.
*
* Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
*/
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int dc;
+ u32 old_compare = kvm_read_c0_guest_compare(cop0);
+ ktime_t now;
+ uint32_t count;
/* if unchanged, must just be an ack */
- if (kvm_read_c0_guest_compare(cop0) == compare)
+ if (old_compare == compare) {
+ if (!ack)
+ return;
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0, compare);
return;
+ }
+
+ /* freeze_hrtimer() takes care of timer interrupts <= count */
+ dc = kvm_mips_count_disabled(vcpu);
+ if (!dc)
+ now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+ if (ack)
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
- /* Update compare */
kvm_write_c0_guest_compare(cop0, compare);
- /* Update timeout if count enabled */
- if (!kvm_mips_count_disabled(vcpu))
- kvm_mips_update_hrtimer(vcpu);
+ /* resume_hrtimer() takes care of timer interrupts > count */
+ if (!dc)
+ kvm_mips_resume_hrtimer(vcpu, now, count);
}
/**
@@ -1068,15 +1079,15 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
uint32_t nasid =
- vcpu->arch.gprs[rt] & ASID_MASK;
+ vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
((kvm_read_c0_guest_entryhi(cop0) &
- ASID_MASK) != nasid)) {
+ KVM_ENTRYHI_ASID) != nasid)) {
kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
kvm_read_c0_guest_entryhi(cop0)
- & ASID_MASK,
+ & KVM_ENTRYHI_ASID,
vcpu->arch.gprs[rt]
- & ASID_MASK);
+ & KVM_ENTRYHI_ASID);
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
/* If we are writing to COMPARE */
/* Clear pending timer interrupt, if any */
- kvm_mips_callbacks->dequeue_timer_int(vcpu);
kvm_mips_write_compare(vcpu,
- vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rt],
+ true);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
unsigned int old_val, val, change;
@@ -1620,11 +1631,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
*/
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
(kvm_read_c0_guest_entryhi
- (cop0) & ASID_MASK));
+ (cop0) & KVM_ENTRYHI_ASID));
if (index < 0) {
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
vcpu);
preempt_enable();
@@ -1636,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
+ vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu);
preempt_enable();
@@ -1655,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
preempt_enable();
- goto dont_update_pc;
+ goto done;
}
@@ -1683,16 +1697,20 @@ skip_fault:
kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
- preempt_enable();
- goto dont_update_pc;
}
preempt_enable();
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
dont_update_pc:
- /* Rollback PC */
- vcpu->arch.pc = curr_pc;
-done:
+ /*
+ * This is for exceptions whose emulation updates the PC, so do not
+ * overwrite the PC under any circumstances
+ */
+
return er;
}
@@ -1786,7 +1804,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1833,7 +1851,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi =
(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1878,7 +1896,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1922,7 +1940,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1967,7 +1985,7 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
#ifdef DEBUG
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
int index;
/* If address not in the guest TLB, then we are in trouble */
@@ -1994,7 +2012,7 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
struct kvm_vcpu_arch *arch = &vcpu->arch;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
@@ -2569,7 +2587,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
*/
index = kvm_mips_guest_tlb_lookup(vcpu,
(va & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
+ (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+ KVM_ENTRYHI_ASID));
if (index < 0) {
if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 4ab4bdfad703..2143884709e4 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,6 +28,7 @@
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
+extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 81687ab1b523..828fcfc1cd7f 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -32,7 +32,6 @@
EXPORT(x);
/* Overload, Danger Will Robinson!! */
-#define PT_HOST_ASID PT_BVADDR
#define PT_HOST_USERLOCAL PT_EPC
#define CP0_DDATA_LO $28,3
@@ -49,45 +48,18 @@
* a1: vcpu
*/
.set noreorder
- .set noat
FEXPORT(__kvm_mips_vcpu_run)
/* k0/k1 not being used in host kernel context */
INT_ADDIU k1, sp, -PT_SIZE
- LONG_S $0, PT_R0(k1)
- LONG_S $1, PT_R1(k1)
- LONG_S $2, PT_R2(k1)
- LONG_S $3, PT_R3(k1)
-
- LONG_S $4, PT_R4(k1)
- LONG_S $5, PT_R5(k1)
- LONG_S $6, PT_R6(k1)
- LONG_S $7, PT_R7(k1)
-
- LONG_S $8, PT_R8(k1)
- LONG_S $9, PT_R9(k1)
- LONG_S $10, PT_R10(k1)
- LONG_S $11, PT_R11(k1)
- LONG_S $12, PT_R12(k1)
- LONG_S $13, PT_R13(k1)
- LONG_S $14, PT_R14(k1)
- LONG_S $15, PT_R15(k1)
LONG_S $16, PT_R16(k1)
LONG_S $17, PT_R17(k1)
-
LONG_S $18, PT_R18(k1)
LONG_S $19, PT_R19(k1)
LONG_S $20, PT_R20(k1)
LONG_S $21, PT_R21(k1)
LONG_S $22, PT_R22(k1)
LONG_S $23, PT_R23(k1)
- LONG_S $24, PT_R24(k1)
- LONG_S $25, PT_R25(k1)
-
- /*
- * XXXKYMA k0/k1 not saved, not being used if we got here through
- * an ioctl()
- */
LONG_S $28, PT_R28(k1)
LONG_S $29, PT_R29(k1)
@@ -104,11 +76,6 @@ FEXPORT(__kvm_mips_vcpu_run)
mfc0 v0, CP0_STATUS
LONG_S v0, PT_STATUS(k1)
- /* Save host ASID, shove it into the BVADDR location */
- mfc0 v1, CP0_ENTRYHI
- andi v1, 0xff
- LONG_S v1, PT_HOST_ASID(k1)
-
/* Save DDATA_LO, will be used to store pointer to vcpu */
mfc0 v1, CP0_DDATA_LO
LONG_S v1, PT_HOST_USERLOCAL(k1)
@@ -170,13 +137,21 @@ FEXPORT(__kvm_mips_load_asid)
INT_SLL t2, t2, 2 /* x4 */
REG_ADDU t3, t1, t2
LONG_L k0, (t3)
- andi k0, k0, 0xff
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ li t3, CPUINFO_SIZE/4
+ mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
+ LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
+ and k0, k0, t2
+#else
+ andi k0, k0, MIPS_ENTRYHI_ASID
+#endif
mtc0 k0, CP0_ENTRYHI
ehb
/* Disable RDHWR access */
mtc0 zero, CP0_HWRENA
+ .set noat
/* Now load up the Guest Context from VCPU */
LONG_L $1, VCPU_R1(k1)
LONG_L $2, VCPU_R2(k1)
@@ -227,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
/* Jump to guest */
eret
+EXPORT(__kvm_mips_vcpu_run_end)
VECTOR(MIPSX(exception), unknown)
/* Find out what mode we came from and jump to the proper handler. */
@@ -288,6 +264,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_S $30, VCPU_R30(k1)
LONG_S $31, VCPU_R31(k1)
+ .set at
+
/* We need to save hi/lo and restore them on the way out */
mfhi t0
LONG_S t0, VCPU_HI(k1)
@@ -339,9 +317,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
/* load up the host EBASE */
mfc0 v0, CP0_STATUS
- .set at
or k0, v0, ST0_BEV
- .set noat
mtc0 k0, CP0_STATUS
ehb
@@ -353,7 +329,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
* If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
* trigger FPE for pending exceptions.
*/
- .set at
and v1, v0, ST0_CU1
beqz v1, 1f
nop
@@ -363,7 +338,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
sw t0, VCPU_FCR31(k1)
ctc1 zero,fcr31
.set pop
- .set noat
1:
#ifdef CONFIG_CPU_HAS_MSA
@@ -386,10 +360,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
- .set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
or v0, v0, ST0_CU0
- .set noat
mtc0 v0, CP0_STATUS
ehb
@@ -456,18 +428,14 @@ __kvm_mips_return_to_guest:
/* Switch EBASE back to the one used by KVM */
mfc0 v1, CP0_STATUS
- .set at
or k0, v1, ST0_BEV
- .set noat
mtc0 k0, CP0_STATUS
ehb
mtc0 t0, CP0_EBASE
/* Setup status register for running guest in UM */
- .set at
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
and v1, v1, ~(ST0_CU0 | ST0_MX)
- .set noat
mtc0 v1, CP0_STATUS
ehb
@@ -489,13 +457,21 @@ __kvm_mips_return_to_guest:
INT_SLL t2, t2, 2 /* x4 */
REG_ADDU t3, t1, t2
LONG_L k0, (t3)
- andi k0, k0, 0xff
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ li t3, CPUINFO_SIZE/4
+ mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
+ LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
+ and k0, k0, t2
+#else
+ andi k0, k0, MIPS_ENTRYHI_ASID
+#endif
mtc0 k0, CP0_ENTRYHI
ehb
/* Disable RDHWR access */
mtc0 zero, CP0_HWRENA
+ .set noat
/* load the guest context from VCPU and return */
LONG_L $0, VCPU_R0(k1)
LONG_L $1, VCPU_R1(k1)
@@ -541,6 +517,7 @@ FEXPORT(__kvm_mips_skip_guest_restore)
LONG_L k1, VCPU_R27(k1)
eret
+ .set at
__kvm_mips_return_to_host:
/* EBASE is already pointing to Linux */
@@ -551,16 +528,6 @@ __kvm_mips_return_to_host:
LONG_L k0, PT_HOST_USERLOCAL(k1)
mtc0 k0, CP0_DDATA_LO
- /* Restore host ASID */
- LONG_L k0, PT_HOST_ASID(sp)
- andi k0, 0xff
- mtc0 k0,CP0_ENTRYHI
- ehb
-
- /* Load context saved on the host stack */
- LONG_L $0, PT_R0(k1)
- LONG_L $1, PT_R1(k1)
-
/*
* r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
@@ -568,19 +535,7 @@ __kvm_mips_return_to_host:
INT_SRA k0, v0, 2
move $2, k0
- LONG_L $3, PT_R3(k1)
- LONG_L $4, PT_R4(k1)
- LONG_L $5, PT_R5(k1)
- LONG_L $6, PT_R6(k1)
- LONG_L $7, PT_R7(k1)
- LONG_L $8, PT_R8(k1)
- LONG_L $9, PT_R9(k1)
- LONG_L $10, PT_R10(k1)
- LONG_L $11, PT_R11(k1)
- LONG_L $12, PT_R12(k1)
- LONG_L $13, PT_R13(k1)
- LONG_L $14, PT_R14(k1)
- LONG_L $15, PT_R15(k1)
+ /* Load context saved on the host stack */
LONG_L $16, PT_R16(k1)
LONG_L $17, PT_R17(k1)
LONG_L $18, PT_R18(k1)
@@ -589,10 +544,6 @@ __kvm_mips_return_to_host:
LONG_L $21, PT_R21(k1)
LONG_L $22, PT_R22(k1)
LONG_L $23, PT_R23(k1)
- LONG_L $24, PT_R24(k1)
- LONG_L $25, PT_R25(k1)
-
- /* Host k0/k1 were not saved */
LONG_L $28, PT_R28(k1)
LONG_L $29, PT_R29(k1)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 3110447ab1e9..44da5259f390 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -56,6 +56,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
+ { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
{NULL}
};
@@ -314,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
memcpy(gebase + offset, mips32_GuestException,
mips32_GuestExceptionEnd - mips32_GuestException);
+#ifdef MODULE
+ offset += mips32_GuestExceptionEnd - mips32_GuestException;
+ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+ vcpu->arch.vcpu_run = gebase + offset;
+#else
+ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -403,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Disable hardware page table walking while in guest */
htw_stop();
- r = __kvm_mips_vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(run, vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
@@ -445,8 +455,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
- if (waitqueue_active(&dvcpu->wq))
- wake_up_interruptible(&dvcpu->wq);
+ if (swait_active(&dvcpu->wq))
+ swake_up(&dvcpu->wq);
return 0;
}
@@ -1079,7 +1089,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_MIPS_FPU:
- r = !!cpu_has_fpu;
+ /* We don't handle systems with inconsistent cpu_has_fpu */
+ r = !!raw_cpu_has_fpu;
break;
case KVM_CAP_MIPS_MSA:
/*
@@ -1174,8 +1185,8 @@ static void kvm_mips_comparecount_func(unsigned long data)
kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu->arch.wait = 0;
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
+ if (swait_active(&vcpu->wq))
+ swake_up(&vcpu->wq);
}
/* low level hrtimer wake routine */
@@ -1555,8 +1566,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable MSA & FPU */
disable_msa();
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
+ disable_fpu_hazard();
+ }
vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
set_c0_status(ST0_CU1);
@@ -1567,6 +1580,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);
+ disable_fpu_hazard();
}
preempt_enable();
}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c43946247..ed021ae7867a 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_kernel_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
}
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_user_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
}
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void)
old_pagemask = read_c0_pagemask();
kvm_info("HOST TLBs:\n");
- kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+ kvm_info("ASID: %#lx\n", read_c0_entryhi() &
+ cpu_asid_mask(&current_cpu_data));
for (i = 0; i < current_cpu_data.tlbsize; i++) {
write_c0_index(i);
@@ -268,6 +275,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
int even;
struct kvm *kvm = vcpu->kvm;
const int flush_dcache_mask = 0;
+ int ret;
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
@@ -299,14 +307,18 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
pfn1 = kvm->arch.guest_pmap[gfn];
}
- entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
(1 << 2) | (0x1 << 1);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
(1 << 2) | (0x1 << 1);
- return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- flush_dcache_mask);
+ preempt_disable();
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+ preempt_enable();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
@@ -361,6 +373,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
kvm_pfn_t pfn0, pfn1;
+ int ret;
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
pfn0 = 0;
@@ -387,9 +400,6 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
*hpa1 = pfn1 << PAGE_SHIFT;
/* Get attributes from the Guest TLB */
- entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
- kvm_mips_get_kernel_asid(vcpu) :
- kvm_mips_get_user_asid(vcpu));
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
@@ -398,8 +408,15 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1);
- return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- tlb->tlb_mask);
+ preempt_disable();
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) :
+ kvm_mips_get_user_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+ preempt_enable();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
@@ -564,15 +581,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
{
unsigned long asid = asid_cache(cpu);
- asid += ASID_INC;
- if (!(asid & ASID_MASK)) {
+ asid += cpu_asid_inc();
+ if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
kvm_local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
- asid = ASID_FIRST_VERSION;
+ asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -627,17 +644,18 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags;
int newasid = 0;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
- /* Alocate new kernel and user ASIDs if needed */
+ /* Allocate new kernel and user ASIDs if needed */
local_irq_save(flags);
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
- ASID_VERSION_MASK) {
+ asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -672,7 +690,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (current->flags & PF_VCPU) {
write_c0_entryhi(vcpu->arch.
- preempt_entryhi & ASID_MASK);
+ preempt_entryhi & asid_mask);
ehb();
}
} else {
@@ -687,11 +705,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.
guest_kernel_asid[cpu] &
- ASID_MASK);
+ asid_mask);
else
write_c0_entryhi(vcpu->arch.
guest_user_asid[cpu] &
- ASID_MASK);
+ asid_mask);
ehb();
}
}
@@ -721,7 +739,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_mips_callbacks->vcpu_get_regs(vcpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- ASID_VERSION_MASK)) {
+ asid_version_mask(cpu))) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
cpu_context(cpu, current->mm));
drop_mmu_context(current->mm, cpu);
@@ -748,7 +766,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
inst = *(opc);
} else {
vpn2 = (unsigned long) opc & VPN2_MASK;
- asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+ asid = kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID;
index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
if (index < 0) {
kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad988000563f..6ba0fafcecbc 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -500,12 +500,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/*
- * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+ * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
*/
kvm_write_c0_guest_intctl(cop0, 0xFC000000);
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
- kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+ kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
+ (vcpu_id & MIPS_EBASE_CPUNUM));
return 0;
}
@@ -546,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_mips_write_count(vcpu, v);
break;
case KVM_REG_MIPS_CP0_COMPARE:
- kvm_mips_write_compare(vcpu, v);
+ kvm_mips_write_compare(vcpu, v, false);
break;
case KVM_REG_MIPS_CP0_CAUSE:
/*