dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'for-upstream' of git://github.com/agraf/linux-2.6 into next

PPC updates from Alex.

* 'for-upstream' of git://github.com/agraf/linux-2.6:
  KVM: PPC: Emulator: clean up SPR reads and writes
  KVM: PPC: Emulator: clean up instruction parsing
  kvm/powerpc: Add new ioctl to retreive server MMU infos
  kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM
  KVM: PPC: bookehv: Fix r8/r13 storing in level exception handler
  KVM: PPC: Book3S: Enable IRQs during exit handling
  KVM: PPC: Fix PR KVM on POWER7 bare metal
  KVM: PPC: Fix stbux emulation
  KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields
  KVM: PPC: Book3S: PR: No isync in slbie path
  KVM: PPC: Book3S: PR: Optimize entry path
  KVM: PPC: booke(hv): Fix save/restore of guest accessible SPRGs.
  KVM: PPC: Restrict PPC_[L|ST]D macro to asm code
  KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.
  KVM: PPC: Use clockevent multiplier and shifter for decrementer
  KVM: Use minimum and maximum address mapped by TLB1

Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2012-05-08 17:00:13 +03:00
commit f2569053e0
27 changed files with 782 additions and 462 deletions

View File

@ -1860,6 +1860,76 @@ See KVM_GET_PIT2 for details on struct kvm_pit_state2.
This IOCTL replaces the obsolete KVM_SET_PIT.
4.74 KVM_PPC_GET_SMMU_INFO
Capability: KVM_CAP_PPC_GET_SMMU_INFO
Architectures: powerpc
Type: vm ioctl
Parameters: None
Returns: 0 on success, -1 on error
This populates and returns a structure describing the features of
the "Server" class MMU emulation supported by KVM.
This can in turn be used by userspace to generate the appropariate
device-tree properties for the guest operating system.
The structure contains some global informations, followed by an
array of supported segment page sizes:
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u32 pad;
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
The supported flags are:
- KVM_PPC_PAGE_SIZES_REAL:
When that flag is set, guest page sizes must "fit" the backing
store page sizes. When not set, any page size in the list can
be used regardless of how they are backed by userspace.
- KVM_PPC_1T_SEGMENTS
The emulated MMU supports 1T segments in addition to the
standard 256M ones.
The "slb_size" field indicates how many SLB entries are supported
The "sps" array contains 8 entries indicating the supported base
page sizes for a segment in increasing order. Each entry is defined
as follow:
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
An entry with a "page_shift" of 0 is unused. Because the array is
organized in increasing order, a lookup can stop when encoutering
such an entry.
The "slb_enc" field provides the encoding to use in the SLB for the
page size. The bits are in positions such as the value can directly
be OR'ed into the "vsid" argument of the slbmte instruction.
The "enc" array is a list which for each of those segment base page
size provides the list of supported actual page sizes (which can be
only larger or equal to the base page size), along with the
corresponding encoding in the hash PTE. Similarily, the array is
8 entries sorted by increasing sizes and an entry with a "0" shift
is an empty entry and a terminator:
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
The "pte_enc" field provides a value that can OR'ed into the hash
PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
into the hash PTE second double word).
5. The kvm_run structure
------------------------

View File

@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__
#ifdef __ASSEMBLY__
#ifdef CONFIG_64BIT
#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
#else
#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
#endif
#endif
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)

View File

@ -237,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM];
@ -245,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
#endif
};
/*

View File

@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void);
@ -138,6 +142,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);

View File

@ -23,6 +23,7 @@
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm);

View File

@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
#endif
#ifdef CONFIG_PPC_BOOK3S_64
EXPORT_SYMBOL_GPL(mmu_psize_defs);
#endif

View File

@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev);
static struct clock_event_device decrementer_clockevent = {
struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
.irq = 0,
@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
.set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT,
};
EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);

View File

@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int dcrn;
int ra;
int rb;
int rc;
int rs;
int rt;
int ws;
int dcrn = get_dcrn(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
int rc = get_rc(inst);
int rs = get_rs(inst);
int rt = get_rt(inst);
int ws = get_ws(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_MFDCR:
dcrn = get_dcrn(inst);
rt = get_rt(inst);
/* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it
* can directly access the timebase registers.
@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_MTDCR:
dcrn = get_dcrn(inst);
rs = get_rs(inst);
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBWE:
ra = get_ra(inst);
rs = get_rs(inst);
ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break;
case XOP_TLBSX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break;
@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
kvmppc_set_pid(vcpu, spr_val); break;
case SPRN_MMUCR:
vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
vcpu->arch.mmucr = spr_val; break;
case SPRN_CCR0:
vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
vcpu->arch.ccr0 = spr_val; break;
case SPRN_CCR1:
vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
vcpu->arch.ccr1 = spr_val; break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
*spr_val = vcpu->arch.pid; break;
case SPRN_MMUCR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
*spr_val = vcpu->arch.mmucr; break;
case SPRN_CCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
*spr_val = vcpu->arch.ccr0; break;
case SPRN_CCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
*spr_val = vcpu->arch.ccr1; break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;

View File

@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \
emulate.o \
book3s.o \
book3s_64_vio.o \
$(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)

View File

@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
or r10, r10, r12
slbie r10
isync
/* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3)

View File

@ -0,0 +1,150 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}

View File

@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
/* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{

View File

@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rt = get_rt(inst);
int rs = get_rs(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
switch (get_op(inst)) {
case 19:
@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
kvmppc_set_gpr(vcpu, get_rt(inst),
vcpu->arch.shared->msr);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
break;
case OP_31_XOP_MTMSRD:
{
ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) {
vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
ulong new_msr = vcpu->arch.shared->msr;
new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
vcpu->arch.shared->msr = new_msr;
} else
kvmppc_set_msr(vcpu, rs);
kvmppc_set_msr(vcpu, rs_val);
break;
}
case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MFSR:
{
@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr);
kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int srnum;
srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr);
kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst)));
kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst)));
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu,
kvmppc_get_gpr(vcpu, get_rs(inst)),
kvmppc_get_gpr(vcpu, get_rb(inst)));
kvmppc_get_gpr(vcpu, rs),
kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu,
kvmppc_get_gpr(vcpu, get_rb(inst)));
kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;
} else {
ulong t, rb;
ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfee(vcpu, rb);
kvmppc_set_gpr(vcpu, get_rt(inst), t);
rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL;
} else {
ulong t, rb;
ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfev(vcpu, rb);
kvmppc_set_gpr(vcpu, get_rt(inst), t);
rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_DCBA:
@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case OP_31_XOP_DCBZ:
{
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong ra = 0;
ulong rb_val = kvmppc_get_gpr(vcpu, rb);
ulong ra_val = 0;
ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr;
int r;
if (get_ra(inst))
ra = kvmppc_get_gpr(vcpu, get_ra(inst));
if (ra)
ra_val = kvmppc_get_gpr(vcpu, ra);
addr = (ra + rb) & ~31ULL;
addr = (ra_val + rb_val) & ~31ULL;
if (!(vcpu->arch.shared->msr & MSR_SF))
addr &= 0xffffffff;
vaddr = addr;
@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SDR1:
@ -428,7 +432,7 @@ unprivileged:
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2)
kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
*spr_val = bat->raw >> 32;
else
kvmppc_set_gpr(vcpu, rt, bat->raw);
*spr_val = bat->raw;
break;
}
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged;
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
*spr_val = to_book3s(vcpu)->sdr1;
break;
case SPRN_DSISR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
*spr_val = vcpu->arch.shared->dsisr;
break;
case SPRN_DAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
*spr_val = vcpu->arch.shared->dar;
break;
case SPRN_HIOR:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
*spr_val = to_book3s(vcpu)->hior;
break;
case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
*spr_val = to_book3s(vcpu)->hid[0];
break;
case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
*spr_val = to_book3s(vcpu)->hid[1];
break;
case SPRN_HID2:
case SPRN_HID2_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
*spr_val = to_book3s(vcpu)->hid[2];
break;
case SPRN_HID4:
case SPRN_HID4_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
*spr_val = to_book3s(vcpu)->hid[4];
break;
case SPRN_HID5:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
*spr_val = to_book3s(vcpu)->hid[5];
break;
case SPRN_CFAR:
case SPRN_PURR:
kvmppc_set_gpr(vcpu, rt, 0);
*spr_val = 0;
break;
case SPRN_GQR0:
case SPRN_GQR1:
@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_GQR5:
case SPRN_GQR6:
case SPRN_GQR7:
kvmppc_set_gpr(vcpu, rt,
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
break;
case SPRN_THRM1:
case SPRN_THRM2:
@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
kvmppc_set_gpr(vcpu, rt, 0);
*spr_val = 0;
break;
default:
unprivileged:
@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
ulong dar = 0;
ulong ra;
ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
switch (get_op(inst)) {
case OP_LFS:
case OP_LFD:
case OP_STFD:
case OP_STFS:
ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst);
break;
case 31:
ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += kvmppc_get_gpr(vcpu, get_rb(inst));
dar += kvmppc_get_gpr(vcpu, rb);
break;
default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);

View File

@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@ -1284,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
return fd;
}
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
int linux_psize)
{
struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
if (!def->shift)
return;
(*sps)->page_shift = def->shift;
(*sps)->slb_enc = def->sllp;
(*sps)->enc[0].page_shift = def->shift;
(*sps)->enc[0].pte_enc = def->penc;
(*sps)++;
}
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
struct kvm_ppc_one_seg_page_size *sps;
info->flags = KVM_PPC_PAGE_SIZES_REAL;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
info->flags |= KVM_PPC_1T_SEGMENTS;
info->slb_size = mmu_slb_size;
/* We only support these sizes for now, and no muti-size segments */
sps = &info->sps[0];
kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
return 0;
}
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@ -1582,12 +1505,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
return EMULATE_FAIL;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
return EMULATE_FAIL;
}

View File

@ -548,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
/* We get here with MSR.EE=0, so enable it to be a nice citizen */
__hard_irq_enable();
trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable();
kvm_resched(vcpu);
@ -1155,6 +1158,31 @@ out:
return r;
}
#ifdef CONFIG_PPC64
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
/* No flags */
info->flags = 0;
/* SLB is always 64 entries */
info->slb_size = 64;
/* Standard 4k base page size segment */
info->sps[0].page_shift = 12;
info->sps[0].slb_enc = 0;
info->sps[0].enc[0].page_shift = 12;
info->sps[0].enc[0].pte_enc = 0;
/* Standard 16M large page size segment */
info->sps[1].page_shift = 24;
info->sps[1].slb_enc = SLB_VSID_L;
info->sps[1].enc[0].page_shift = 24;
info->sps[1].enc[0].pte_enc = 0;
return 0;
}
#endif /* CONFIG_PPC64 */
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
@ -1168,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
}
static int kvmppc_book3s_init(void)

View File

@ -15,6 +15,8 @@
* published by the Free Software Foundation.
*/
#include <linux/anon_inodes.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE:
return kvmppc_h_pr_bulk_remove(vcpu);
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);

View File

@ -128,24 +128,25 @@ no_dcbz32_on:
/* First clear RI in our current MSR value */
li r0, MSR_RI
andc r6, r6, r0
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r0, SVCPU_R0(r3)
PPC_LL r1, SVCPU_R1(r3)
PPC_LL r2, SVCPU_R2(r3)
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r5, SVCPU_R5(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r7, SVCPU_R7(r3)
PPC_LL r8, SVCPU_R8(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r10, SVCPU_R10(r3)
PPC_LL r11, SVCPU_R11(r3)
PPC_LL r12, SVCPU_R12(r3)
PPC_LL r13, SVCPU_R13(r3)
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3)
RFI
@ -197,6 +198,7 @@ kvmppc_interrupt:
/* Save guest PC and MSR */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
mr r10, r12
andi. r0,r12,0x2
beq 1f
mfspr r3,SPRN_HSRR0
@ -316,23 +318,17 @@ no_dcbz32_off:
* Having set up SRR0/1 with the address where we want
* to continue with relocation on (potentially in module
* space), we either just go straight there with rfi[d],
* or we jump to an interrupt handler with bctr if there
* is an interrupt to be handled first. In the latter
* case, the rfi[d] at the end of the interrupt handler
* will get us back to where we want to continue.
* or we jump to an interrupt handler if there is an
* interrupt to be handled first. In the latter case,
* the rfi[d] at the end of the interrupt handler will
* get us back to where we want to continue.
*/
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
1: mtctr r12
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
* R10 = raw exit handler id
* R12 = exit handler id
* R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
@ -342,12 +338,26 @@ no_dcbz32_off:
PPC_LL r6, HSTATE_HOST_MSR(r13)
PPC_LL r8, HSTATE_VMHANDLER(r13)
/* Restore host msr -> SRR1 */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
andi. r0,r10,0x2
beq 1f
mtspr SPRN_HSRR1, r6
mtspr SPRN_HSRR0, r8
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
1: /* Restore host msr -> SRR1 */
mtsrr1 r6
/* Load highmem handler address */
mtsrr0 r8
/* RFI into the highmem handler, or jump to interrupt handler */
beqctr
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beqa BOOK3S_INTERRUPT_EXTERNAL
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beqa BOOK3S_INTERRUPT_DECREMENTER
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beqa BOOK3S_INTERRUPT_PERFMON
RFI
kvmppc_handler_trampoline_exit_end:

View File

@ -75,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
/* low-level asm code to transfer guest state */
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);

View File

@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rs;
int rt;
int rs = get_rs(inst);
int rt = get_rt(inst);
switch (get_op(inst)) {
case 19:
@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
@ -105,22 +102,26 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
* will return the wrong result if called for them in another context
* (such as debugging).
*/
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
vcpu->arch.shared->dar = spr_val; break;
vcpu->arch.shared->dar = spr_val;
break;
case SPRN_ESR:
vcpu->arch.shared->esr = spr_val; break;
vcpu->arch.shared->esr = spr_val;
break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break;
vcpu->arch.dbcr0 = spr_val;
break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = spr_val; break;
vcpu->arch.dbcr1 = spr_val;
break;
case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val; break;
vcpu->arch.dbsr &= ~spr_val;
break;
case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val);
break;
@ -134,13 +135,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
* guest (PR-mode only).
*/
case SPRN_SPRG4:
vcpu->arch.shared->sprg4 = spr_val; break;
vcpu->arch.shared->sprg4 = spr_val;
break;
case SPRN_SPRG5:
vcpu->arch.shared->sprg5 = spr_val; break;
vcpu->arch.shared->sprg5 = spr_val;
break;
case SPRN_SPRG6:
vcpu->arch.shared->sprg6 = spr_val; break;
vcpu->arch.shared->sprg6 = spr_val;
break;
case SPRN_SPRG7:
vcpu->arch.shared->sprg7 = spr_val; break;
vcpu->arch.shared->sprg7 = spr_val;
break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
@ -210,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated;
}
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
*spr_val = vcpu->arch.ivpr;
break;
case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
*spr_val = vcpu->arch.shared->dar;
break;
case SPRN_ESR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
*spr_val = vcpu->arch.shared->esr;
break;
case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
*spr_val = vcpu->arch.dbcr0;
break;
case SPRN_DBCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
*spr_val = vcpu->arch.dbcr1;
break;
case SPRN_DBSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
*spr_val = vcpu->arch.dbsr;
break;
case SPRN_TSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
*spr_val = vcpu->arch.tsr;
break;
case SPRN_TCR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
*spr_val = vcpu->arch.tcr;
break;
case SPRN_IVOR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default:

View File

@ -419,13 +419,13 @@ lightweight_exit:
* written directly to the shared area, so we
* need to reload them here with the guest's values.
*/
lwz r3, VCPU_SHARED_SPRG4(r5)
PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
mtspr SPRN_SPRG4W, r3
lwz r3, VCPU_SHARED_SPRG5(r5)
PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
mtspr SPRN_SPRG5W, r3
lwz r3, VCPU_SHARED_SPRG6(r5)
PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
mtspr SPRN_SPRG6W, r3
lwz r3, VCPU_SHARED_SPRG7(r5)
PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING

View File

@ -87,17 +87,13 @@
mfspr r8, SPRN_TBRL
mfspr r9, SPRN_TBRU
cmpw r9, r7
PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4)
stw r8, VCPU_TIMING_EXIT_TBL(r4)
bne- 1b
PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4)
stw r9, VCPU_TIMING_EXIT_TBU(r4)
#endif
oris r8, r6, MSR_CE@h
#ifdef CONFIG_64BIT
std r6, (VCPU_SHARED_MSR)(r11)
#else
stw r6, (VCPU_SHARED_MSR + 4)(r11)
#endif
PPC_STD(r6, VCPU_SHARED_MSR, r11)
ori r8, r8, MSR_ME | MSR_RI
PPC_STL r5, VCPU_PC(r4)
@ -220,7 +216,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(r4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
PPC_STL r5, VCPU_GPR(r5)(r11)
PPC_STL r13, VCPU_CR(r11)
stw r13, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
@ -247,7 +243,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(r4)(r11)
PPC_LL r4, GPR9(r8)
PPC_STL r5, VCPU_GPR(r5)(r11)
PPC_STL r9, VCPU_CR(r11)
stw r9, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r8)(r11)
PPC_LL r3, GPR10(r8)
@ -256,10 +252,10 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r6, \srr1
PPC_LL r4, GPR11(r8)
PPC_STL r7, VCPU_GPR(r7)(r11)
PPC_STL r8, VCPU_GPR(r8)(r11)
PPC_STL r3, VCPU_GPR(r10)(r11)
mfctr r7
PPC_STL r12, VCPU_GPR(r12)(r11)
PPC_STL r13, VCPU_GPR(r13)(r11)
PPC_STL r4, VCPU_GPR(r11)(r11)
PPC_STL r7, VCPU_CTR(r11)
mr r4, r11
@ -319,14 +315,14 @@ _GLOBAL(kvmppc_resume_host)
mfspr r6, SPRN_SPRG4
PPC_STL r5, VCPU_LR(r4)
mfspr r7, SPRN_SPRG5
PPC_STL r3, VCPU_VRSAVE(r4)
PPC_STL r6, VCPU_SHARED_SPRG4(r11)
stw r3, VCPU_VRSAVE(r4)
PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
mfspr r8, SPRN_SPRG6
PPC_STL r7, VCPU_SHARED_SPRG5(r11)
PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
mfspr r9, SPRN_SPRG7
PPC_STL r8, VCPU_SHARED_SPRG6(r11)
PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
mfxer r3
PPC_STL r9, VCPU_SHARED_SPRG7(r11)
PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
/* save guest MAS registers and restore host mas4 & mas6 */
mfspr r5, SPRN_MAS0
@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host)
stw r5, VCPU_SHARED_MAS0(r11)
mfspr r7, SPRN_MAS2
stw r6, VCPU_SHARED_MAS1(r11)
#ifdef CONFIG_64BIT
std r7, (VCPU_SHARED_MAS2)(r11)
#else
stw r7, (VCPU_SHARED_MAS2 + 4)(r11)
#endif
PPC_STD(r7, VCPU_SHARED_MAS2, r11)
mfspr r5, SPRN_MAS3
mfspr r6, SPRN_MAS4
stw r5, VCPU_SHARED_MAS7_3+4(r11)
@ -527,11 +519,7 @@ lightweight_exit:
stw r3, VCPU_HOST_MAS6(r4)
lwz r3, VCPU_SHARED_MAS0(r11)
lwz r5, VCPU_SHARED_MAS1(r11)
#ifdef CONFIG_64BIT
ld r6, (VCPU_SHARED_MAS2)(r11)
#else
lwz r6, (VCPU_SHARED_MAS2 + 4)(r11)
#endif
PPC_LD(r6, VCPU_SHARED_MAS2, r11)
lwz r7, VCPU_SHARED_MAS7_3+4(r11)
lwz r8, VCPU_SHARED_MAS4(r11)
mtspr SPRN_MAS0, r3
@ -549,13 +537,13 @@ lightweight_exit:
* SPRGs, so we need to reload them here with the guest's values.
*/
lwz r3, VCPU_VRSAVE(r4)
lwz r5, VCPU_SHARED_SPRG4(r11)
PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
mtspr SPRN_VRSAVE, r3
lwz r6, VCPU_SHARED_SPRG5(r11)
PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
mtspr SPRN_SPRG4W, r5
lwz r7, VCPU_SHARED_SPRG6(r11)
PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
mtspr SPRN_SPRG5W, r6
lwz r8, VCPU_SHARED_SPRG7(r11)
PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
mtspr SPRN_SPRG6W, r7
mtspr SPRN_SPRG7W, r8
@ -563,13 +551,9 @@ lightweight_exit:
PPC_LL r3, VCPU_LR(r4)
PPC_LL r5, VCPU_XER(r4)
PPC_LL r6, VCPU_CTR(r4)
PPC_LL r7, VCPU_CR(r4)
lwz r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
#ifdef CONFIG_64BIT
ld r9, (VCPU_SHARED_MSR)(r11)
#else
lwz r9, (VCPU_SHARED_MSR + 4)(r11)
#endif
PPC_LD(r9, VCPU_SHARED_MSR, r11)
PPC_LL r0, VCPU_GPR(r0)(r4)
PPC_LL r1, VCPU_GPR(r1)(r4)
PPC_LL r2, VCPU_GPR(r2)(r4)
@ -590,9 +574,9 @@ lightweight_exit:
mfspr r9, SPRN_TBRL
mfspr r8, SPRN_TBRU
cmpw r8, r6
PPC_STL r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
bne 1b
PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
#endif
/*

View File

@ -89,6 +89,10 @@ struct kvmppc_vcpu_e500 {
u64 *g2h_tlb1_map;
unsigned int *h2g_tlb1_rmap;
/* Minimum and maximum address mapped my TLB1 */
unsigned long tlb1_min_eaddr;
unsigned long tlb1_max_eaddr;
#ifdef CONFIG_KVM_E500V2
u32 pid[E500_PID_NUM];

View File

@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra;
int rb;
int rt;
int ra = get_ra(inst);
int rb = get_rb(inst);
int rt = get_rt(inst);
switch (get_op(inst)) {
case 31:
@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
#ifdef CONFIG_KVM_E500MC
case XOP_MSGSND:
emulated = kvmppc_e500_emul_msgsnd(vcpu, get_rb(inst));
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
break;
case XOP_MSGCLR:
emulated = kvmppc_e500_emul_msgclr(vcpu, get_rb(inst));
emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
break;
#endif
@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBSX:
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
case XOP_TLBILX:
ra = get_ra(inst);
rb = get_rb(inst);
rt = get_rt(inst);
emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
break;
case XOP_TLBIVAX:
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
@ -146,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV
@ -160,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_PID1:
if (spr_val != 0)
return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break;
vcpu_e500->pid[1] = spr_val;
break;
case SPRN_PID2:
if (spr_val != 0)
return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
vcpu_e500->pid[2] = spr_val;
break;
case SPRN_MAS0:
vcpu->arch.shared->mas0 = spr_val; break;
vcpu->arch.shared->mas0 = spr_val;
break;
case SPRN_MAS1:
vcpu->arch.shared->mas1 = spr_val; break;
vcpu->arch.shared->mas1 = spr_val;
break;
case SPRN_MAS2:
vcpu->arch.shared->mas2 = spr_val; break;
vcpu->arch.shared->mas2 = spr_val;
break;
case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val;
break;
case SPRN_MAS4:
vcpu->arch.shared->mas4 = spr_val; break;
vcpu->arch.shared->mas4 = spr_val;
break;
case SPRN_MAS6:
vcpu->arch.shared->mas6 = spr_val; break;
vcpu->arch.shared->mas6 = spr_val;
break;
case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
@ -189,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val; break;
vcpu_e500->l1csr1 = spr_val;
break;
case SPRN_HID0:
vcpu_e500->hid0 = spr_val; break;
vcpu_e500->hid0 = spr_val;
break;
case SPRN_HID1:
vcpu_e500->hid1 = spr_val; break;
vcpu_e500->hid1 = spr_val;
break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@ -222,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
#endif
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV
unsigned long val;
case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
*spr_val = vcpu_e500->pid[0];
break;
case SPRN_PID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
*spr_val = vcpu_e500->pid[1];
break;
case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
*spr_val = vcpu_e500->pid[2];
break;
case SPRN_MAS0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
*spr_val = vcpu->arch.shared->mas0;
break;
case SPRN_MAS1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
*spr_val = vcpu->arch.shared->mas1;
break;
case SPRN_MAS2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
*spr_val = vcpu->arch.shared->mas2;
break;
case SPRN_MAS3:
val = (u32)vcpu->arch.shared->mas7_3;
kvmppc_set_gpr(vcpu, rt, val);
*spr_val = (u32)vcpu->arch.shared->mas7_3;
break;
case SPRN_MAS4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
*spr_val = vcpu->arch.shared->mas4;
break;
case SPRN_MAS6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
*spr_val = vcpu->arch.shared->mas6;
break;
case SPRN_MAS7:
val = vcpu->arch.shared->mas7_3 >> 32;
kvmppc_set_gpr(vcpu, rt, val);
*spr_val = vcpu->arch.shared->mas7_3 >> 32;
break;
#endif
case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break;
*spr_val = vcpu->arch.tlbcfg[0];
break;
case SPRN_TLB1CFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break;
*spr_val = vcpu->arch.tlbcfg[1];
break;
case SPRN_L1CSR0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
*spr_val = vcpu_e500->l1csr0;
break;
case SPRN_L1CSR1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
*spr_val = vcpu_e500->l1csr1;
break;
case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
*spr_val = vcpu_e500->hid0;
break;
case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
*spr_val = vcpu_e500->hid1;
break;
case SPRN_SVR:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;
*spr_val = vcpu_e500->svr;
break;
case SPRN_MMUCSR0:
kvmppc_set_gpr(vcpu, rt, 0); break;
*spr_val = 0;
break;
case SPRN_MMUCFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break;
*spr_val = vcpu->arch.mmucfg;
break;
/* extra exceptions */
case SPRN_IVOR32:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
case SPRN_IVOR33:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break;
case SPRN_IVOR34:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
case SPRN_IVOR35:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break;
#ifdef CONFIG_KVM_BOOKE_HV
case SPRN_IVOR36:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
break;
case SPRN_IVOR37:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]);
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
break;
#endif
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;

View File

@ -261,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
set_base = gtlb0_set_base(vcpu_e500, eaddr);
size = vcpu_e500->gtlb_params[0].ways;
} else {
if (eaddr < vcpu_e500->tlb1_min_eaddr ||
eaddr > vcpu_e500->tlb1_max_eaddr)
return -1;
set_base = 0;
}
@ -583,6 +586,65 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
return victim;
}
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int size = vcpu_e500->gtlb_params[1].entries;
unsigned int offset;
gva_t eaddr;
int i;
vcpu_e500->tlb1_min_eaddr = ~0UL;
vcpu_e500->tlb1_max_eaddr = 0;
offset = vcpu_e500->gtlb_offset[1];
for (i = 0; i < size; i++) {
struct kvm_book3e_206_tlb_entry *tlbe =
&vcpu_e500->gtlb_arch[offset + i];
if (!get_tlb_v(tlbe))
continue;
eaddr = get_tlb_eaddr(tlbe);
vcpu_e500->tlb1_min_eaddr =
min(vcpu_e500->tlb1_min_eaddr, eaddr);
eaddr = get_tlb_end(tlbe);
vcpu_e500->tlb1_max_eaddr =
max(vcpu_e500->tlb1_max_eaddr, eaddr);
}
}
static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
unsigned long start, end, size;
size = get_tlb_bytes(gtlbe);
start = get_tlb_eaddr(gtlbe) & ~(size - 1);
end = start + size - 1;
return vcpu_e500->tlb1_min_eaddr == start ||
vcpu_e500->tlb1_max_eaddr == end;
}
/* This function is supposed to be called for a adding a new valid tlb entry */
static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
unsigned long start, end, size;
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
if (!get_tlb_v(gtlbe))
return;
size = get_tlb_bytes(gtlbe);
start = get_tlb_eaddr(gtlbe) & ~(size - 1);
end = start + size - 1;
vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
}
static inline int kvmppc_e500_gtlbe_invalidate(
struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel, int esel)
@ -593,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
kvmppc_recalc_tlb1map_range(vcpu_e500);
gtlbe->mas1 = 0;
return 0;
@ -792,14 +857,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
int tlbsel, esel, stlbsel, sesel;
int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
if (get_tlb_v(gtlbe))
if (get_tlb_v(gtlbe)) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
if ((tlbsel == 1) &&
kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
recal = 1;
}
gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2;
@ -808,6 +878,18 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
gtlbe->mas2, gtlbe->mas7_3);
if (tlbsel == 1) {
/*
* If a valid tlb1 entry is overwritten then recalculate the
* min/max TLB1 map address range otherwise no need to look
* in tlb1 array.
*/
if (recal)
kvmppc_recalc_tlb1map_range(vcpu_e500);
else
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr;
@ -1145,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
vcpu_e500->gtlb_params[1].sets = 1;
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err_put_page:
@ -1163,7 +1246,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *dirty)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
kvmppc_recalc_tlb1map_range(vcpu_e500);
clear_tlb_refs(vcpu_e500);
return 0;
}
@ -1272,6 +1355,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu->arch.tlbcfg[1] |=
vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err:

View File

@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
#include <linux/clockchips.h>
#include <asm/reg.h>
#include <asm/time.h>
@ -104,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
*/
dec_time = vcpu->arch.dec;
dec_time *= 1000;
do_div(dec_time, tb_ticks_per_usec);
/*
* Guest timebase ticks at the same frequency as host decrementer.
* So use the host decrementer calculations for decrementer emulation.
*/
dec_time = dec_time << decrementer_clockevent.shift;
do_div(dec_time, decrementer_clockevent.mult);
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
@ -143,13 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = kvmppc_get_last_inst(vcpu);
int ra;
int rb;
int rs;
int rt;
int sprn;
int ra = get_ra(inst);
int rs = get_rs(inst);
int rt = get_rt(inst);
int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
ulong spr_val = 0;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@ -184,141 +189,116 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
break;
case OP_31_XOP_LWZX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_31_XOP_LBZX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_31_XOP_LBZUX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STWX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_31_XOP_STBUX:
rs = get_rs(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LHAX:
rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZUX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MFSPR:
sprn = get_sprn(inst);
rt = get_rt(inst);
switch (sprn) {
case SPRN_SRR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
spr_val = vcpu->arch.pvr;
break;
case SPRN_PIR:
kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
spr_val = vcpu->vcpu_id;
break;
case SPRN_MSSSR0:
kvmppc_set_gpr(vcpu, rt, 0); break;
spr_val = 0;
break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
spr_val = get_tb() >> 32;
break;
case SPRN_TBWU:
kvmppc_set_gpr(vcpu, rt, get_tb()); break;
spr_val = get_tb();
break;
case SPRN_SPRG0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
{
kvmppc_set_gpr(vcpu, rt,
kvmppc_get_dec(vcpu, get_tb()));
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
}
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
if (emulated == EMULATE_FAIL) {
printk("mfspr: unknown spr %x\n", sprn);
kvmppc_set_gpr(vcpu, rt, 0);
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
&spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
}
break;
}
kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
break;
case OP_31_XOP_STHX:
rs = get_rs(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_31_XOP_STHUX:
rs = get_rs(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
@ -326,14 +306,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MTSPR:
sprn = get_sprn(inst);
rs = get_rs(inst);
spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
@ -344,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MSSSR0: break;
case SPRN_DEC:
vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
vcpu->arch.shared->sprg3 = spr_val;
break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
spr_val);
if (emulated == EMULATE_FAIL)
printk("mtspr: unknown spr %x\n", sprn);
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
@ -379,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_LWBRX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break;
@ -387,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_STWBRX:
rs = get_rs(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
case OP_31_XOP_LHBRX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break;
case OP_31_XOP_STHBRX:
rs = get_rs(inst);
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 0);
@ -418,39 +389,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_LWZ:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_LWZU:
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LBZ:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_LBZU:
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STW:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_STWU:
ra = get_ra(inst);
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
@ -458,15 +420,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_STB:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
ra = get_ra(inst);
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
@ -474,39 +433,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_LHZ:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_LHZU:
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHA:
rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_LHAU:
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STH:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
ra = get_ra(inst);
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);

View File

@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
#endif
#ifdef CONFIG_KVM_BOOK3S_64_HV
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT:
r = threads_per_core;
break;
@ -277,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;
break;
#endif
default:
r = 0;
break;
@ -716,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
default:
r = -EINVAL;
}
@ -773,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
#ifdef CONFIG_KVM_BOOK3S_64_HV
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
struct kvm *kvm = filp->private_data;
@ -784,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: {
struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma;
@ -796,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm *kvm = filp->private_data;
struct kvm_ppc_smmu_info info;
memset(&info, 0, sizeof(info));
r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
r = -EFAULT;
break;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
}

View File

@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
__u8 pad[108];
};
/* for KVM_PPC_GET_SMMU_INFO */
#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u32 pad;
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@ -591,6 +615,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_PCI_2_3 75
#define KVM_CAP_KVMCLOCK_CTRL 76
#define KVM_CAP_SIGNAL_MSI 77
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#ifdef KVM_CAP_IRQ_ROUTING
@ -800,6 +825,8 @@ struct kvm_s390_ucas_mapping {
struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
/*
* ioctls for vcpu fds