forked from Minki/linux
kvm: Refactor handling of VM debugfs files
We can store reference to kvm_stats_debugfs_item instead of copying its values to kvm_stat_data. This allows us to remove duplicated code and usage of temporary kvm_stat_data inside vm_stat_get et al. Signed-off-by: Milan Pandurov <milanpa@amazon.de> Reviewed-by: Alexander Graf <graf@amazon.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e30a7d623d
commit
09cbcef6c6
@ -1108,9 +1108,8 @@ enum kvm_stat_kind {
|
||||
};
|
||||
|
||||
struct kvm_stat_data {
|
||||
int offset;
|
||||
int mode;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stats_debugfs_item *dbgfs_item;
|
||||
};
|
||||
|
||||
struct kvm_stats_debugfs_item {
|
||||
@ -1119,6 +1118,10 @@ struct kvm_stats_debugfs_item {
|
||||
enum kvm_stat_kind kind;
|
||||
int mode;
|
||||
};
|
||||
|
||||
#define KVM_DBGFS_GET_MODE(dbgfs_item) \
|
||||
((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
|
||||
|
||||
extern struct kvm_stats_debugfs_item debugfs_entries[];
|
||||
extern struct dentry *kvm_debugfs_dir;
|
||||
|
||||
|
@ -113,7 +113,7 @@ struct dentry *kvm_debugfs_dir;
|
||||
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
|
||||
|
||||
static int kvm_debugfs_num_entries;
|
||||
static const struct file_operations *stat_fops_per_vm[];
|
||||
static const struct file_operations stat_fops_per_vm;
|
||||
|
||||
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
|
||||
unsigned long arg);
|
||||
@ -650,11 +650,11 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
|
||||
return -ENOMEM;
|
||||
|
||||
stat_data->kvm = kvm;
|
||||
stat_data->offset = p->offset;
|
||||
stat_data->mode = p->mode ? p->mode : 0644;
|
||||
stat_data->dbgfs_item = p;
|
||||
kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
|
||||
debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
|
||||
stat_data, stat_fops_per_vm[p->kind]);
|
||||
debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
|
||||
kvm->debugfs_dentry, stat_data,
|
||||
&stat_fops_per_vm);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -4010,8 +4010,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
|
||||
return -ENOENT;
|
||||
|
||||
if (simple_attr_open(inode, file, get,
|
||||
stat_data->mode & S_IWUGO ? set : NULL,
|
||||
fmt)) {
|
||||
KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
|
||||
? set : NULL,
|
||||
fmt)) {
|
||||
kvm_put_kvm(stat_data->kvm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -4030,105 +4031,111 @@ static int kvm_debugfs_release(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_stat_get_per_vm(void *data, u64 *val)
|
||||
static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
|
||||
{
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
|
||||
*val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
|
||||
*val = *(ulong *)((void *)kvm + offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_stat_clear_per_vm(void *data, u64 val)
|
||||
static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
|
||||
{
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
*(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
|
||||
*(ulong *)((void *)kvm + offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
__simple_attr_check_format("%llu\n", 0ull);
|
||||
return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
|
||||
vm_stat_clear_per_vm, "%llu\n");
|
||||
}
|
||||
|
||||
static const struct file_operations vm_stat_get_per_vm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = vm_stat_get_per_vm_open,
|
||||
.release = kvm_debugfs_release,
|
||||
.read = simple_attr_read,
|
||||
.write = simple_attr_write,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int vcpu_stat_get_per_vm(void *data, u64 *val)
|
||||
static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
|
||||
{
|
||||
int i;
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
*val = 0;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
|
||||
*val += *(u64 *)((void *)vcpu + stat_data->offset);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
*val += *(u64 *)((void *)vcpu + offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_stat_clear_per_vm(void *data, u64 val)
|
||||
static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
|
||||
{
|
||||
int i;
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
*(u64 *)((void *)vcpu + offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_stat_data_get(void *data, u64 *val)
|
||||
{
|
||||
int r = -EFAULT;
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
|
||||
switch (stat_data->dbgfs_item->kind) {
|
||||
case KVM_STAT_VM:
|
||||
r = kvm_get_stat_per_vm(stat_data->kvm,
|
||||
stat_data->dbgfs_item->offset, val);
|
||||
break;
|
||||
case KVM_STAT_VCPU:
|
||||
r = kvm_get_stat_per_vcpu(stat_data->kvm,
|
||||
stat_data->dbgfs_item->offset, val);
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_stat_data_clear(void *data, u64 val)
|
||||
{
|
||||
int r = -EFAULT;
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
|
||||
*(u64 *)((void *)vcpu + stat_data->offset) = 0;
|
||||
switch (stat_data->dbgfs_item->kind) {
|
||||
case KVM_STAT_VM:
|
||||
r = kvm_clear_stat_per_vm(stat_data->kvm,
|
||||
stat_data->dbgfs_item->offset);
|
||||
break;
|
||||
case KVM_STAT_VCPU:
|
||||
r = kvm_clear_stat_per_vcpu(stat_data->kvm,
|
||||
stat_data->dbgfs_item->offset);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
|
||||
static int kvm_stat_data_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
__simple_attr_check_format("%llu\n", 0ull);
|
||||
return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
|
||||
vcpu_stat_clear_per_vm, "%llu\n");
|
||||
return kvm_debugfs_open(inode, file, kvm_stat_data_get,
|
||||
kvm_stat_data_clear, "%llu\n");
|
||||
}
|
||||
|
||||
static const struct file_operations vcpu_stat_get_per_vm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = vcpu_stat_get_per_vm_open,
|
||||
static const struct file_operations stat_fops_per_vm = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = kvm_stat_data_open,
|
||||
.release = kvm_debugfs_release,
|
||||
.read = simple_attr_read,
|
||||
.write = simple_attr_write,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations *stat_fops_per_vm[] = {
|
||||
[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
|
||||
[KVM_STAT_VM] = &vm_stat_get_per_vm_fops,
|
||||
.read = simple_attr_read,
|
||||
.write = simple_attr_write,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int vm_stat_get(void *_offset, u64 *val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
u64 tmp_val;
|
||||
|
||||
*val = 0;
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
||||
kvm_get_stat_per_vm(kvm, offset, &tmp_val);
|
||||
*val += tmp_val;
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
@ -4139,15 +4146,13 @@ static int vm_stat_clear(void *_offset, u64 val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
kvm_clear_stat_per_vm(kvm, offset);
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
@ -4160,14 +4165,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
u64 tmp_val;
|
||||
|
||||
*val = 0;
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
||||
kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
|
||||
*val += tmp_val;
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
@ -4178,15 +4181,13 @@ static int vcpu_stat_clear(void *_offset, u64 val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
kvm_clear_stat_per_vcpu(kvm, offset);
|
||||
}
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
@ -4259,9 +4260,8 @@ static void kvm_init_debug(void)
|
||||
|
||||
kvm_debugfs_num_entries = 0;
|
||||
for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
|
||||
int mode = p->mode ? p->mode : 0644;
|
||||
debugfs_create_file(p->name, mode, kvm_debugfs_dir,
|
||||
(void *)(long)p->offset,
|
||||
debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
|
||||
kvm_debugfs_dir, (void *)(long)p->offset,
|
||||
stat_fops[p->kind]);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user