2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

kvm/ppc: IRQ disabling cleanup

Simplify the handling of lazy EE by going directly from fully-enabled
to hard-disabled.  This replaces the lazy_irq_pending() check
(including its misplaced kvm_guest_exit() call).

As suggested by Tiejun Chen, move the interrupt disabling into
kvmppc_prepare_to_enter() rather than have each caller do it.  Also
move the IRQ enabling on heavyweight exit into
kvmppc_prepare_to_enter().

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Scott Wood 2014-01-09 19:18:40 -06:00 committed by Alexander Graf
parent 70713fe315
commit 6c85f52b10
4 changed files with 26 additions and 32 deletions

View File

@ -456,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
trace_hardirqs_on(); trace_hardirqs_on();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/*
* To avoid races, the caller must have gone directly from having
* interrupts fully-enabled to hard-disabled.
*/
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
/* Only need to enable IRQs by hard enabling them after this */ /* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0; local_paca->irq_happened = 0;
local_paca->soft_enabled = 1; local_paca->soft_enabled = 1;

View File

@ -999,14 +999,14 @@ program_interrupt:
* and if we really did time things so badly, then we just exit * and if we really did time things so badly, then we just exit
* again due to a host external interrupt. * again due to a host external interrupt.
*/ */
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0)
local_irq_enable();
r = s; r = s;
} else { else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
} }
kvmppc_handle_lost_ext(vcpu); kvmppc_handle_lost_ext(vcpu);
} }
@ -1219,12 +1219,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* really did time things so badly, then we just exit again due to * really did time things so badly, then we just exit again due to
* a host external interrupt. * a host external interrupt.
*/ */
local_irq_disable();
ret = kvmppc_prepare_to_enter(vcpu); ret = kvmppc_prepare_to_enter(vcpu);
if (ret <= 0) { if (ret <= 0)
local_irq_enable();
goto out; goto out;
} /* interrupts now hard-disabled */
/* Save FPU state in thread_struct */ /* Save FPU state in thread_struct */
if (current->thread.regs->msr & MSR_FP) if (current->thread.regs->msr & MSR_FP)

View File

@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
local_irq_disable(); hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1; r = 1;
@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return -EINVAL; return -EINVAL;
} }
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0) {
local_irq_enable();
ret = s; ret = s;
goto out; goto out;
} }
/* interrupts now hard-disabled */
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */ /* Save userspace FPU state in stack */
@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* aren't already exiting to userspace for some other reason. * aren't already exiting to userspace for some other reason.
*/ */
if (!(r & RESUME_HOST)) { if (!(r & RESUME_HOST)) {
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0)
local_irq_enable();
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else { else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
} }
} }

View File

@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
*/ */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{ {
int r = 1; int r;
WARN_ON(irqs_disabled());
hard_irq_disable();
WARN_ON_ONCE(!irqs_disabled());
while (true) { while (true) {
if (need_resched()) { if (need_resched()) {
local_irq_enable(); local_irq_enable();
cond_resched(); cond_resched();
local_irq_disable(); hard_irq_disable();
continue; continue;
} }
@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
trace_kvm_check_requests(vcpu); trace_kvm_check_requests(vcpu);
r = kvmppc_core_check_requests(vcpu); r = kvmppc_core_check_requests(vcpu);
local_irq_disable(); hard_irq_disable();
if (r > 0) if (r > 0)
continue; continue;
break; break;
@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue; continue;
} }
#ifdef CONFIG_PPC64
/* lazy EE magic */
hard_irq_disable();
if (lazy_irq_pending()) {
/* Got an interrupt in between, try again */
local_irq_enable();
local_irq_disable();
kvm_guest_exit();
continue;
}
#endif
kvm_guest_enter(); kvm_guest_enter();
break; return 1;
} }
/* return to host */
local_irq_enable();
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);