Merge remote-tracking branch 'kvm/next' into kvm-next-5.20
KVM/s390, KVM/x86 and common infrastructure changes for 5.20 x86: * Permit guests to ignore single-bit ECC errors * Fix races in gfn->pfn cache refresh; do not pin pages tracked by the cache * Intel IPI virtualization * Allow getting/setting pending triple fault with KVM_GET/SET_VCPU_EVENTS * PEBS virtualization * Simplify PMU emulation by just using PERF_TYPE_RAW events * More accurate event reinjection on SVM (avoid retrying instructions) * Allow getting/setting the state of the speaker port data bit * Refuse starting the kvm-intel module if VM-Entry/VM-Exit controls are inconsistent * "Notify" VM exit (detect microarchitectural hangs) for Intel * Cleanups for MCE MSR emulation s390: * add an interface to provide a hypervisor dump for secure guests * improve selftests to use TAP interface * enable interpretive execution of zPCI instructions (for PCI passthrough) * First part of deferred teardown * CPU Topology * PV attestation * Minor fixes Generic: * new selftests API using struct kvm_vcpu instead of a (vm, id) tuple x86: * Use try_cmpxchg64 instead of cmpxchg64 * Bugfixes * Ignore benign host accesses to PMU MSRs when PMU is disabled * Allow disabling KVM's "MONITOR/MWAIT are NOPs!" behavior * x86/MMU: Allow NX huge pages to be disabled on a per-vm basis * Port eager page splitting to shadow MMU as well * Enable CMCI capability by default and handle injected UCNA errors * Expose pid of vcpu threads in debugfs * x2AVIC support for AMD * cleanup PIO emulation * Fixes for LLDT/LTR emulation * Don't require refcounted "struct page" to create huge SPTEs x86 cleanups: * Use separate namespaces for guest PTEs and shadow PTEs bitmasks * PIO emulation * Reorganize rmap API, mostly around rmap destruction * Do not workaround very old KVM bugs for L0 that runs with nesting enabled * new selftests API for CPUID
This commit is contained in:
commit
63f4b21041
238 changed files with 12175 additions and 7098 deletions
|
|
@ -168,7 +168,7 @@ __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
|
|||
{
|
||||
}
|
||||
|
||||
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
|
||||
bool kvm_is_zone_device_page(struct page *page)
|
||||
{
|
||||
/*
|
||||
* The metadata used by is_zone_device_page() to determine whether or
|
||||
|
|
@ -176,25 +176,42 @@ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
|
|||
* the device has been pinned, e.g. by get_user_pages(). WARN if the
|
||||
* page_count() is zero to help detect bad usage of this helper.
|
||||
*/
|
||||
if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
|
||||
if (WARN_ON_ONCE(!page_count(page)))
|
||||
return false;
|
||||
|
||||
return is_zone_device_page(pfn_to_page(pfn));
|
||||
return is_zone_device_page(page);
|
||||
}
|
||||
|
||||
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
|
||||
/*
|
||||
* Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
|
||||
* page, NULL otherwise. Note, the list of refcounted PG_reserved page types
|
||||
* is likely incomplete, it has been compiled purely through people wanting to
|
||||
* back guest with a certain type of memory and encountering issues.
|
||||
*/
|
||||
struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (!pfn_valid(pfn))
|
||||
return NULL;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (!PageReserved(page))
|
||||
return page;
|
||||
|
||||
/* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
|
||||
if (is_zero_pfn(pfn))
|
||||
return page;
|
||||
|
||||
/*
|
||||
* ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
|
||||
* perspective they are "normal" pages, albeit with slightly different
|
||||
* usage rules.
|
||||
*/
|
||||
if (pfn_valid(pfn))
|
||||
return PageReserved(pfn_to_page(pfn)) &&
|
||||
!is_zero_pfn(pfn) &&
|
||||
!kvm_is_zone_device_pfn(pfn);
|
||||
if (kvm_is_zone_device_page(page))
|
||||
return page;
|
||||
|
||||
return true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -239,7 +256,7 @@ static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
|
|||
return mode == IN_GUEST_MODE;
|
||||
}
|
||||
|
||||
static void ack_flush(void *_completed)
|
||||
static void ack_kick(void *_completed)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
@ -248,7 +265,7 @@ static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
|
|||
if (cpumask_empty(cpus))
|
||||
return false;
|
||||
|
||||
smp_call_function_many(cpus, ack_flush, NULL, wait);
|
||||
smp_call_function_many(cpus, ack_kick, NULL, wait);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -379,16 +396,31 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
|
|||
return (void *)__get_free_page(gfp_flags);
|
||||
}
|
||||
|
||||
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
|
||||
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
|
||||
{
|
||||
gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
|
||||
void *obj;
|
||||
|
||||
if (mc->nobjs >= min)
|
||||
return 0;
|
||||
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
|
||||
obj = mmu_memory_cache_alloc_obj(mc, (mc->gfp_custom) ?
|
||||
mc->gfp_custom :
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
|
||||
if (unlikely(!mc->objects)) {
|
||||
if (WARN_ON_ONCE(!capacity))
|
||||
return -EIO;
|
||||
|
||||
mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
|
||||
if (!mc->objects)
|
||||
return -ENOMEM;
|
||||
|
||||
mc->capacity = capacity;
|
||||
}
|
||||
|
||||
/* It is illegal to request a different capacity across topups. */
|
||||
if (WARN_ON_ONCE(mc->capacity != capacity))
|
||||
return -EIO;
|
||||
|
||||
while (mc->nobjs < mc->capacity) {
|
||||
obj = mmu_memory_cache_alloc_obj(mc, gfp);
|
||||
if (!obj)
|
||||
return mc->nobjs >= min ? 0 : -ENOMEM;
|
||||
mc->objects[mc->nobjs++] = obj;
|
||||
|
|
@ -396,6 +428,11 @@ int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
|
||||
{
|
||||
return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
|
||||
}
|
||||
|
||||
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
|
||||
{
|
||||
return mc->nobjs;
|
||||
|
|
@ -409,6 +446,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
|
|||
else
|
||||
free_page((unsigned long)mc->objects[--mc->nobjs]);
|
||||
}
|
||||
|
||||
kvfree(mc->objects);
|
||||
|
||||
mc->objects = NULL;
|
||||
mc->capacity = 0;
|
||||
}
|
||||
|
||||
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
|
||||
|
|
@ -726,6 +768,15 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||
kvm->mn_active_invalidate_count++;
|
||||
spin_unlock(&kvm->mn_invalidate_lock);
|
||||
|
||||
/*
|
||||
* Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
|
||||
* before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
|
||||
* each cache's lock. There are relatively few caches in existence at
|
||||
* any given time, and the caches themselves can check for hva overlap,
|
||||
* i.e. don't need to rely on memslot overlap checks for performance.
|
||||
* Because this runs without holding mmu_lock, the pfn caches must use
|
||||
* mn_active_invalidate_count (see above) instead of mmu_notifier_count.
|
||||
*/
|
||||
gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
|
||||
hva_range.may_block);
|
||||
|
||||
|
|
@ -2494,9 +2545,12 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
|
|||
|
||||
static int kvm_try_get_pfn(kvm_pfn_t pfn)
|
||||
{
|
||||
if (kvm_is_reserved_pfn(pfn))
|
||||
struct page *page = kvm_pfn_to_refcounted_page(pfn);
|
||||
|
||||
if (!page)
|
||||
return 1;
|
||||
return get_page_unless_zero(pfn_to_page(pfn));
|
||||
|
||||
return get_page_unless_zero(page);
|
||||
}
|
||||
|
||||
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
|
||||
|
|
@ -2582,7 +2636,7 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
|
|||
bool write_fault, bool *writable)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
kvm_pfn_t pfn = 0;
|
||||
kvm_pfn_t pfn;
|
||||
int npages, r;
|
||||
|
||||
/* we can do it either atomically or asynchronously, not both */
|
||||
|
|
@ -2713,34 +2767,32 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
|
||||
|
||||
static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
|
||||
{
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
|
||||
if (kvm_is_reserved_pfn(pfn)) {
|
||||
WARN_ON(1);
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
}
|
||||
|
||||
return pfn_to_page(pfn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not use this helper unless you are absolutely certain the gfn _must_ be
|
||||
* backed by 'struct page'. A valid example is if the backing memslot is
|
||||
* controlled by KVM. Note, if the returned page is valid, it's refcount has
|
||||
* been elevated by gfn_to_pfn().
|
||||
*/
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct page *page;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
return kvm_pfn_to_page(pfn);
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
|
||||
page = kvm_pfn_to_refcounted_page(pfn);
|
||||
if (!page)
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
|
||||
{
|
||||
if (pfn == 0)
|
||||
return;
|
||||
|
||||
if (dirty)
|
||||
kvm_release_pfn_dirty(pfn);
|
||||
else
|
||||
|
|
@ -2806,28 +2858,48 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
||||
|
||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
static bool kvm_is_ad_tracked_page(struct page *page)
|
||||
{
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
|
||||
|
||||
return kvm_pfn_to_page(pfn);
|
||||
/*
|
||||
* Per page-flags.h, pages tagged PG_reserved "should in general not be
|
||||
* touched (e.g. set dirty) except by its owner".
|
||||
*/
|
||||
return !PageReserved(page);
|
||||
}
|
||||
|
||||
static void kvm_set_page_dirty(struct page *page)
|
||||
{
|
||||
if (kvm_is_ad_tracked_page(page))
|
||||
SetPageDirty(page);
|
||||
}
|
||||
|
||||
static void kvm_set_page_accessed(struct page *page)
|
||||
{
|
||||
if (kvm_is_ad_tracked_page(page))
|
||||
mark_page_accessed(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
|
||||
|
||||
void kvm_release_page_clean(struct page *page)
|
||||
{
|
||||
WARN_ON(is_error_page(page));
|
||||
|
||||
kvm_release_pfn_clean(page_to_pfn(page));
|
||||
kvm_set_page_accessed(page);
|
||||
put_page(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
|
||||
|
||||
void kvm_release_pfn_clean(kvm_pfn_t pfn)
|
||||
{
|
||||
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
|
||||
put_page(pfn_to_page(pfn));
|
||||
struct page *page;
|
||||
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return;
|
||||
|
||||
page = kvm_pfn_to_refcounted_page(pfn);
|
||||
if (!page)
|
||||
return;
|
||||
|
||||
kvm_release_page_clean(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
||||
|
||||
|
|
@ -2835,28 +2907,48 @@ void kvm_release_page_dirty(struct page *page)
|
|||
{
|
||||
WARN_ON(is_error_page(page));
|
||||
|
||||
kvm_release_pfn_dirty(page_to_pfn(page));
|
||||
kvm_set_page_dirty(page);
|
||||
kvm_release_page_clean(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
|
||||
|
||||
void kvm_release_pfn_dirty(kvm_pfn_t pfn)
|
||||
{
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
struct page *page;
|
||||
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return;
|
||||
|
||||
page = kvm_pfn_to_refcounted_page(pfn);
|
||||
if (!page)
|
||||
return;
|
||||
|
||||
kvm_release_page_dirty(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
|
||||
|
||||
/*
|
||||
* Note, checking for an error/noslot pfn is the caller's responsibility when
|
||||
* directly marking a page dirty/accessed. Unlike the "release" helpers, the
|
||||
* "set" helpers are not to be used when the pfn might point at garbage.
|
||||
*/
|
||||
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
|
||||
{
|
||||
if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
|
||||
SetPageDirty(pfn_to_page(pfn));
|
||||
if (WARN_ON(is_error_noslot_pfn(pfn)))
|
||||
return;
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
kvm_set_page_dirty(pfn_to_page(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
|
||||
|
||||
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
|
||||
{
|
||||
if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
|
||||
mark_page_accessed(pfn_to_page(pfn));
|
||||
if (WARN_ON(is_error_noslot_pfn(pfn)))
|
||||
return;
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
kvm_set_page_accessed(pfn_to_page(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
||||
|
||||
|
|
@ -3730,9 +3822,18 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
|
|||
return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
|
||||
}
|
||||
|
||||
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
|
||||
static int vcpu_get_pid(void *data, u64 *val)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
|
||||
*val = pid_nr(rcu_access_pointer(vcpu->pid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
|
||||
|
||||
static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
|
||||
struct dentry *debugfs_dentry;
|
||||
char dir_name[ITOA_MAX_LEN * 2];
|
||||
|
||||
|
|
@ -3742,10 +3843,12 @@ static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
|||
snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
|
||||
debugfs_dentry = debugfs_create_dir(dir_name,
|
||||
vcpu->kvm->debugfs_dentry);
|
||||
debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
|
||||
&vcpu_get_pid_fops);
|
||||
|
||||
kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Creates some virtual cpus. Good luck creating more than one.
|
||||
|
|
@ -3765,13 +3868,15 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = kvm_arch_vcpu_precreate(kvm, id);
|
||||
if (r) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
kvm->created_vcpus++;
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
r = kvm_arch_vcpu_precreate(kvm, id);
|
||||
if (r)
|
||||
goto vcpu_decrement;
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
|
||||
if (!vcpu) {
|
||||
r = -ENOMEM;
|
||||
|
|
|
|||
|
|
@ -95,48 +95,143 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
|
||||
|
||||
static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
|
||||
static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
|
||||
{
|
||||
/* Unmap the old page if it was mapped before, and release it */
|
||||
if (!is_error_noslot_pfn(pfn)) {
|
||||
if (khva) {
|
||||
if (pfn_valid(pfn))
|
||||
kunmap(pfn_to_page(pfn));
|
||||
/* Unmap the old pfn/page if it was mapped before. */
|
||||
if (!is_error_noslot_pfn(pfn) && khva) {
|
||||
if (pfn_valid(pfn))
|
||||
kunmap(pfn_to_page(pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
else
|
||||
memunmap(khva);
|
||||
else
|
||||
memunmap(khva);
|
||||
#endif
|
||||
}
|
||||
|
||||
kvm_release_pfn(pfn, false);
|
||||
}
|
||||
}
|
||||
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
|
||||
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
|
||||
{
|
||||
/*
|
||||
* mn_active_invalidate_count acts for all intents and purposes
|
||||
* like mmu_notifier_count here; but the latter cannot be used
|
||||
* here because the invalidation of caches in the mmu_notifier
|
||||
* event occurs _before_ mmu_notifier_count is elevated.
|
||||
*
|
||||
* Note, it does not matter that mn_active_invalidate_count
|
||||
* is not protected by gpc->lock. It is guaranteed to
|
||||
* be elevated before the mmu_notifier acquires gpc->lock, and
|
||||
* isn't dropped until after mmu_notifier_seq is updated.
|
||||
*/
|
||||
if (kvm->mn_active_invalidate_count)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Ensure mn_active_invalidate_count is read before
|
||||
* mmu_notifier_seq. This pairs with the smp_wmb() in
|
||||
* mmu_notifier_invalidate_range_end() to guarantee either the
|
||||
* old (non-zero) value of mn_active_invalidate_count or the
|
||||
* new (incremented) value of mmu_notifier_seq is observed.
|
||||
*/
|
||||
smp_rmb();
|
||||
return kvm->mmu_notifier_seq != mmu_seq;
|
||||
}
|
||||
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
/* Note, the new page offset may be different than the old! */
|
||||
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
|
||||
void *new_khva = NULL;
|
||||
unsigned long mmu_seq;
|
||||
kvm_pfn_t new_pfn;
|
||||
int retry;
|
||||
|
||||
lockdep_assert_held(&gpc->refresh_lock);
|
||||
|
||||
lockdep_assert_held_write(&gpc->lock);
|
||||
|
||||
/*
|
||||
* Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
|
||||
* assets have already been updated and so a concurrent check() from a
|
||||
* different task may not fail the gpa/uhva/generation checks.
|
||||
*/
|
||||
gpc->valid = false;
|
||||
|
||||
do {
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
/*
|
||||
* If the previous iteration "failed" due to an mmu_notifier
|
||||
* event, release the pfn and unmap the kernel virtual address
|
||||
* from the previous attempt. Unmapping might sleep, so this
|
||||
* needs to be done after dropping the lock. Opportunistically
|
||||
* check for resched while the lock isn't held.
|
||||
*/
|
||||
if (new_pfn != KVM_PFN_ERR_FAULT) {
|
||||
/*
|
||||
* Keep the mapping if the previous iteration reused
|
||||
* the existing mapping and didn't create a new one.
|
||||
*/
|
||||
if (new_khva != old_khva)
|
||||
gpc_unmap_khva(kvm, new_pfn, new_khva);
|
||||
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* We always request a writeable mapping */
|
||||
new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL);
|
||||
new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
|
||||
if (is_error_noslot_pfn(new_pfn))
|
||||
break;
|
||||
goto out_error;
|
||||
|
||||
KVM_MMU_READ_LOCK(kvm);
|
||||
retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva);
|
||||
KVM_MMU_READ_UNLOCK(kvm);
|
||||
if (!retry)
|
||||
break;
|
||||
/*
|
||||
* Obtain a new kernel mapping if KVM itself will access the
|
||||
* pfn. Note, kmap() and memremap() can both sleep, so this
|
||||
* too must be done outside of gpc->lock!
|
||||
*/
|
||||
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||
if (new_pfn == gpc->pfn) {
|
||||
new_khva = old_khva;
|
||||
} else if (pfn_valid(new_pfn)) {
|
||||
new_khva = kmap(pfn_to_page(new_pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else {
|
||||
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
if (!new_khva) {
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
goto out_error;
|
||||
}
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
} while (1);
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
return new_pfn;
|
||||
/*
|
||||
* Other tasks must wait for _this_ refresh to complete before
|
||||
* attempting to refresh.
|
||||
*/
|
||||
WARN_ON_ONCE(gpc->valid);
|
||||
} while (mmu_notifier_retry_cache(kvm, mmu_seq));
|
||||
|
||||
gpc->valid = true;
|
||||
gpc->pfn = new_pfn;
|
||||
gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
|
||||
|
||||
/*
|
||||
* Put the reference to the _new_ pfn. The pfn is now tracked by the
|
||||
* cache and can be safely migrated, swapped, etc... as the cache will
|
||||
* invalidate any mappings in response to relevant mmu_notifier events.
|
||||
*/
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
|
||||
return 0;
|
||||
|
||||
out_error:
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
|
|
@ -146,9 +241,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
unsigned long page_offset = gpa & ~PAGE_MASK;
|
||||
kvm_pfn_t old_pfn, new_pfn;
|
||||
unsigned long old_uhva;
|
||||
gpa_t old_gpa;
|
||||
void *old_khva;
|
||||
bool old_valid;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
|
@ -158,13 +251,18 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
if (page_offset + len > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If another task is refreshing the cache, wait for it to complete.
|
||||
* There is no guarantee that concurrent refreshes will see the same
|
||||
* gpa, memslots generation, etc..., so they must be fully serialized.
|
||||
*/
|
||||
mutex_lock(&gpc->refresh_lock);
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
old_gpa = gpc->gpa;
|
||||
old_pfn = gpc->pfn;
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_uhva = gpc->uhva;
|
||||
old_valid = gpc->valid;
|
||||
|
||||
/* If the userspace HVA is invalid, refresh that first */
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
|
|
@ -177,64 +275,17 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
gpc->uhva += page_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the userspace HVA changed or the PFN was already invalid,
|
||||
* drop the lock and do the HVA to PFN lookup again.
|
||||
*/
|
||||
if (!old_valid || old_uhva != gpc->uhva) {
|
||||
unsigned long uhva = gpc->uhva;
|
||||
void *new_khva = NULL;
|
||||
|
||||
/* Placeholders for "hva is valid but not yet mapped" */
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->khva = NULL;
|
||||
gpc->valid = true;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
new_pfn = hva_to_pfn_retry(kvm, uhva);
|
||||
if (is_error_noslot_pfn(new_pfn)) {
|
||||
ret = -EFAULT;
|
||||
goto map_done;
|
||||
}
|
||||
|
||||
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||
if (new_pfn == old_pfn) {
|
||||
new_khva = old_khva;
|
||||
old_pfn = KVM_PFN_ERR_FAULT;
|
||||
old_khva = NULL;
|
||||
} else if (pfn_valid(new_pfn)) {
|
||||
new_khva = kmap(pfn_to_page(new_pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else {
|
||||
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
if (new_khva)
|
||||
new_khva += page_offset;
|
||||
else
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
map_done:
|
||||
write_lock_irq(&gpc->lock);
|
||||
if (ret) {
|
||||
gpc->valid = false;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->khva = NULL;
|
||||
} else {
|
||||
/* At this point, gpc->valid may already have been cleared */
|
||||
gpc->pfn = new_pfn;
|
||||
gpc->khva = new_khva;
|
||||
}
|
||||
if (!gpc->valid || old_uhva != gpc->uhva) {
|
||||
ret = hva_to_pfn_retry(kvm, gpc);
|
||||
} else {
|
||||
/* If the HVA→PFN mapping was already valid, don't unmap it. */
|
||||
old_pfn = KVM_PFN_ERR_FAULT;
|
||||
|
|
@ -242,9 +293,26 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* Invalidate the cache and purge the pfn/khva if the refresh failed.
|
||||
* Some/all of the uhva, gpa, and memslot generation info may still be
|
||||
* valid, leave it as is.
|
||||
*/
|
||||
if (ret) {
|
||||
gpc->valid = false;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->khva = NULL;
|
||||
}
|
||||
|
||||
/* Snapshot the new pfn before dropping the lock! */
|
||||
new_pfn = gpc->pfn;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
__release_gpc(kvm, old_pfn, old_khva, old_gpa);
|
||||
mutex_unlock(&gpc->refresh_lock);
|
||||
|
||||
if (old_pfn != new_pfn)
|
||||
gpc_unmap_khva(kvm, old_pfn, old_khva);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -254,14 +322,13 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
|||
{
|
||||
void *old_khva;
|
||||
kvm_pfn_t old_pfn;
|
||||
gpa_t old_gpa;
|
||||
|
||||
mutex_lock(&gpc->refresh_lock);
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
gpc->valid = false;
|
||||
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_gpa = gpc->gpa;
|
||||
old_pfn = gpc->pfn;
|
||||
|
||||
/*
|
||||
|
|
@ -272,8 +339,9 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
|||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
mutex_unlock(&gpc->refresh_lock);
|
||||
|
||||
__release_gpc(kvm, old_pfn, old_khva, old_gpa);
|
||||
gpc_unmap_khva(kvm, old_pfn, old_khva);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
|
||||
|
||||
|
|
@ -286,6 +354,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
|||
|
||||
if (!gpc->active) {
|
||||
rwlock_init(&gpc->lock);
|
||||
mutex_init(&gpc->refresh_lock);
|
||||
|
||||
gpc->khva = NULL;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue