|
|
|
@ -512,6 +512,12 @@ static void kvm_null_fn(void)
|
|
|
|
|
}
|
|
|
|
|
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
|
|
|
|
|
|
|
|
|
|
/* Iterate over each memslot intersecting [start, last] (inclusive) range */
|
|
|
|
|
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
|
|
|
|
|
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
|
|
|
|
|
node; \
|
|
|
|
|
node = interval_tree_iter_next(node, start, last)) \
|
|
|
|
|
|
|
|
|
|
static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
|
|
|
|
|
const struct kvm_hva_range *range)
|
|
|
|
|
{
|
|
|
|
@ -521,6 +527,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
|
|
|
|
|
struct kvm_memslots *slots;
|
|
|
|
|
int i, idx;
|
|
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(range->end <= range->start))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* A null handler is allowed if and only if on_lock() is provided. */
|
|
|
|
|
if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
|
|
|
|
|
IS_KVM_NULL_FN(range->handler)))
|
|
|
|
@ -529,15 +538,17 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
|
|
slots = __kvm_memslots(kvm, i);
|
|
|
|
|
kvm_for_each_memslot(slot, slots) {
|
|
|
|
|
kvm_for_each_memslot_in_hva_range(node, slots,
|
|
|
|
|
range->start, range->end - 1) {
|
|
|
|
|
unsigned long hva_start, hva_end;
|
|
|
|
|
|
|
|
|
|
slot = container_of(node, struct kvm_memory_slot, hva_node);
|
|
|
|
|
hva_start = max(range->start, slot->userspace_addr);
|
|
|
|
|
hva_end = min(range->end, slot->userspace_addr +
|
|
|
|
|
(slot->npages << PAGE_SHIFT));
|
|
|
|
|
if (hva_start >= hva_end)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* To optimize for the likely case where the address
|
|
|
|
@ -873,6 +884,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
|
|
|
|
|
if (!slots)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
slots->hva_tree = RB_ROOT_CACHED;
|
|
|
|
|
hash_init(slots->id_hash);
|
|
|
|
|
|
|
|
|
|
return slots;
|
|
|
|
@ -1277,21 +1289,28 @@ static void kvm_replace_memslot(struct kvm_memslots *slots,
|
|
|
|
|
struct kvm_memory_slot *new)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Remove the old memslot from the hash list, copying the node data
|
|
|
|
|
* would corrupt the list.
|
|
|
|
|
* Remove the old memslot from the hash list and interval tree, copying
|
|
|
|
|
* the node data would corrupt the structures.
|
|
|
|
|
*/
|
|
|
|
|
if (old) {
|
|
|
|
|
hash_del(&old->id_node);
|
|
|
|
|
interval_tree_remove(&old->hva_node, &slots->hva_tree);
|
|
|
|
|
|
|
|
|
|
if (!new)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Copy the source *data*, not the pointer, to the destination. */
|
|
|
|
|
*new = *old;
|
|
|
|
|
} else {
|
|
|
|
|
/* If @old is NULL, initialize @new's hva range. */
|
|
|
|
|
new->hva_node.start = new->userspace_addr;
|
|
|
|
|
new->hva_node.last = new->userspace_addr +
|
|
|
|
|
(new->npages << PAGE_SHIFT) - 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* (Re)Add the new memslot. */
|
|
|
|
|
hash_add(slots->id_hash, &new->id_node, new->id);
|
|
|
|
|
interval_tree_insert(&new->hva_node, &slots->hva_tree);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
|
|
|
|
@ -1322,7 +1341,7 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
|
|
|
|
|
atomic_set(&slots->last_used_slot, 0);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Remove the to-be-deleted memslot from the list _before_ shifting
|
|
|
|
|
* Remove the to-be-deleted memslot from the list/tree _before_ shifting
|
|
|
|
|
* the trailing memslots forward, its data will be overwritten.
|
|
|
|
|
* Defer the (somewhat pointless) copying of the memslot until after
|
|
|
|
|
* the last slot has been shifted to avoid overwriting said last slot.
|
|
|
|
@ -1349,7 +1368,8 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
|
|
|
|
|
* itself is not preserved in the array, i.e. not swapped at this time, only
|
|
|
|
|
* its new index into the array is tracked. Returns the changed memslot's
|
|
|
|
|
* current index into the memslots array.
|
|
|
|
|
* The memslot at the returned index will not be in @slots->id_hash by then.
|
|
|
|
|
* The memslot at the returned index will not be in @slots->hva_tree or
|
|
|
|
|
* @slots->id_hash by then.
|
|
|
|
|
* @memslot is a detached struct with desired final data of the changed slot.
|
|
|
|
|
*/
|
|
|
|
|
static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
|
|
|
|
@ -1363,10 +1383,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Delete the slot from the hash table before sorting the remaining
|
|
|
|
|
* slots, the slot's data may be overwritten when copying slots as part
|
|
|
|
|
* of the sorting proccess. update_memslots() will unconditionally
|
|
|
|
|
* rewrite the entire slot and re-add it to the hash table.
|
|
|
|
|
* Delete the slot from the hash table and interval tree before sorting
|
|
|
|
|
* the remaining slots, the slot's data may be overwritten when copying
|
|
|
|
|
* slots as part of the sorting proccess. update_memslots() will
|
|
|
|
|
* unconditionally rewrite and re-add the entire slot.
|
|
|
|
|
*/
|
|
|
|
|
kvm_replace_memslot(slots, oldslot, NULL);
|
|
|
|
|
|
|
|
|
@ -1392,10 +1412,12 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
|
|
|
|
|
* is not preserved in the array, i.e. not swapped at this time, only its new
|
|
|
|
|
* index into the array is tracked. Returns the changed memslot's final index
|
|
|
|
|
* into the memslots array.
|
|
|
|
|
* The memslot at the returned index will not be in @slots->id_hash by then.
|
|
|
|
|
* The memslot at the returned index will not be in @slots->hva_tree or
|
|
|
|
|
* @slots->id_hash by then.
|
|
|
|
|
* @memslot is a detached struct with desired final data of the new or
|
|
|
|
|
* changed slot.
|
|
|
|
|
* Assumes that the memslot at @start index is not in @slots->id_hash.
|
|
|
|
|
* Assumes that the memslot at @start index is not in @slots->hva_tree or
|
|
|
|
|
* @slots->id_hash.
|
|
|
|
|
*/
|
|
|
|
|
static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
|
|
|
|
|
struct kvm_memory_slot *memslot,
|
|
|
|
@ -1588,9 +1610,12 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
|
|
|
|
|
|
|
|
|
|
memcpy(slots, old, kvm_memslots_size(old->used_slots));
|
|
|
|
|
|
|
|
|
|
slots->hva_tree = RB_ROOT_CACHED;
|
|
|
|
|
hash_init(slots->id_hash);
|
|
|
|
|
kvm_for_each_memslot(memslot, slots)
|
|
|
|
|
kvm_for_each_memslot(memslot, slots) {
|
|
|
|
|
interval_tree_insert(&memslot->hva_node, &slots->hva_tree);
|
|
|
|
|
hash_add(slots->id_hash, &memslot->id_node, memslot->id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return slots;
|
|
|
|
|
}
|
|
|
|
|