summaryrefslogtreecommitdiff
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h136
1 files changed, 108 insertions, 28 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ecc554374e4..cad77fe09d7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -22,6 +22,7 @@
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
+#include <linux/irqflags.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -47,28 +48,40 @@
/*
* For the normal pfn, the highest 12 bits should be zero,
- * so we can mask these bits to indicate the error.
+ * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
+ * mask bit 63 to indicate the noslot pfn.
*/
-#define KVM_PFN_ERR_MASK (0xfffULL << 52)
+#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
+#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
+#define KVM_PFN_NOSLOT (0x1ULL << 63)
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
-#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
-#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
+#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+/*
+ * error pfns indicate that the gfn is in slot but faild to
+ * translate it to pfn on host.
+ */
static inline bool is_error_pfn(pfn_t pfn)
{
return !!(pfn & KVM_PFN_ERR_MASK);
}
-static inline bool is_noslot_pfn(pfn_t pfn)
+/*
+ * error_noslot pfns indicate that the gfn can not be
+ * translated to pfn - it is not in slot or failed to
+ * translate it to pfn.
+ */
+static inline bool is_error_noslot_pfn(pfn_t pfn)
{
- return pfn == KVM_PFN_ERR_BAD;
+ return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
}
-static inline bool is_invalid_pfn(pfn_t pfn)
+/* noslot pfn indicates that the gfn is not in slot. */
+static inline bool is_noslot_pfn(pfn_t pfn)
{
- return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
+ return pfn == KVM_PFN_NOSLOT;
}
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
@@ -107,6 +120,11 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_IMMEDIATE_EXIT 15
#define KVM_REQ_PMU 16
#define KVM_REQ_PMI 17
+#define KVM_REQ_WATCHDOG 18
+#define KVM_REQ_MASTERCLOCK_UPDATE 19
+#define KVM_REQ_MCLOCK_INPROGRESS 20
+#define KVM_REQ_EPR_EXIT 21
+#define KVM_REQ_EOIBITMAP 22
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -251,12 +269,11 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
struct kvm_memory_slot {
gfn_t base_gfn;
unsigned long npages;
- unsigned long flags;
unsigned long *dirty_bitmap;
struct kvm_arch_memory_slot arch;
unsigned long userspace_addr;
- int user_alloc;
- int id;
+ u32 flags;
+ short id;
};
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
@@ -298,8 +315,12 @@ struct kvm_irq_routing_table {};
#endif
+#ifndef KVM_PRIVATE_MEM_SLOTS
+#define KVM_PRIVATE_MEM_SLOTS 0
+#endif
+
#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
/*
@@ -311,7 +332,7 @@ struct kvm_memslots {
u64 generation;
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
- int id_to_index[KVM_MEM_SLOTS_NUM];
+ short id_to_index[KVM_MEM_SLOTS_NUM];
};
struct kvm {
@@ -409,7 +430,8 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+ u64 last_generation);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
@@ -432,10 +454,10 @@ id_to_memslot(struct kvm_memslots *slots, int id)
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
@@ -443,11 +465,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc);
+ bool user_alloc);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
/* flush all memory translations */
@@ -516,6 +538,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
+void kvm_make_mclock_inprogress_request(struct kvm *kvm);
+void kvm_make_update_eoibitmap_request(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -533,7 +557,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
- int user_alloc);
+ bool user_alloc);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -569,9 +593,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
@@ -666,8 +690,10 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level);
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
@@ -723,11 +749,52 @@ static inline int kvm_deassign_device(struct kvm *kvm,
}
#endif /* CONFIG_IOMMU_API */
+static inline void __guest_enter(void)
+{
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
+ current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
+ current->flags &= ~PF_VCPU;
+}
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern void guest_enter(void);
+extern void guest_exit(void);
+
+#else /* !CONFIG_CONTEXT_TRACKING */
+static inline void guest_enter(void)
+{
+ __guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+ __guest_exit();
+}
+#endif /* !CONFIG_CONTEXT_TRACKING */
+
static inline void kvm_guest_enter(void)
{
+ unsigned long flags;
+
BUG_ON(preemptible());
- vtime_account(current);
- current->flags |= PF_VCPU;
+
+ local_irq_save(flags);
+ guest_enter();
+ local_irq_restore(flags);
+
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspase from rcu point of view. In
@@ -740,8 +807,11 @@ static inline void kvm_guest_enter(void)
static inline void kvm_guest_exit(void)
{
- vtime_account(current);
- current->flags &= ~PF_VCPU;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit();
+ local_irq_restore(flags);
}
/*
@@ -830,9 +900,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
+static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(vcpu->kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_notifier_count))
return 1;
/*
* Ensure the read of mmu_notifier_count happens before the read
@@ -845,7 +915,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
* can't rely on kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_notifier_seq != mmu_seq)
return 1;
return 0;
}
@@ -873,10 +943,20 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_EVENTFD
void kvm_eventfd_init(struct kvm *kvm);
+int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
-int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+#else
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+{
+ return -EINVAL;
+}
+
+static inline void kvm_irqfd_release(struct kvm *kvm) {}
+#endif
#else