vfio: Move vfio_device_assign_container() into vfio_device_first_open()

The only thing this function does is assert the group has an assigned
container and incrs refcounts.

The overall model we have is that once a container_users refcount is
incremented it cannot be de-assigned from the group -
vfio_group_ioctl_unset_container() will fail and the group FD cannot be
closed.

Thus we do not need to check this on every device FD open, just the
first. Reorganize the code so that only the first open and last close
manages the container.

Link: https://lore.kernel.org/r/2-v4-42cd2eb0e3eb+335a-vfio_iommufd_jgg@nvidia.com
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Yi Liu <yi.l.liu@intel.com>
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Tested-by: Alex Williamson <alex.williamson@redhat.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Yi Liu <yi.l.liu@intel.com>
Tested-by: Lixiao Yang <lixiao.yang@intel.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
Tested-by: Yu He <yu.he@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2022-11-29 16:31:47 -04:00
parent 294aaccb50
commit bab6fabc01
2 changed files with 13 additions and 15 deletions

View File

@ -531,11 +531,11 @@ int vfio_device_assign_container(struct vfio_device *device)
void vfio_device_unassign_container(struct vfio_device *device)
{
mutex_lock(&device->group->group_lock);
lockdep_assert_held_write(&device->group->group_lock);
WARN_ON(device->group->container_users <= 1);
device->group->container_users--;
fput(device->group->opened_file);
mutex_unlock(&device->group->group_lock);
}
/*

View File

@ -749,18 +749,24 @@ static int vfio_device_first_open(struct vfio_device *device)
* during close_device.
*/
mutex_lock(&device->group->group_lock);
ret = vfio_device_assign_container(device);
if (ret)
goto err_module_put;
device->kvm = device->group->kvm;
if (device->ops->open_device) {
ret = device->ops->open_device(device);
if (ret)
goto err_module_put;
goto err_container;
}
vfio_device_container_register(device);
mutex_unlock(&device->group->group_lock);
return 0;
err_module_put:
err_container:
device->kvm = NULL;
vfio_device_unassign_container(device);
err_module_put:
mutex_unlock(&device->group->group_lock);
module_put(device->dev->driver->owner);
return ret;
@ -775,6 +781,7 @@ static void vfio_device_last_close(struct vfio_device *device)
if (device->ops->close_device)
device->ops->close_device(device);
device->kvm = NULL;
vfio_device_unassign_container(device);
mutex_unlock(&device->group->group_lock);
module_put(device->dev->driver->owner);
}
@ -784,18 +791,12 @@ static struct file *vfio_device_open(struct vfio_device *device)
struct file *filep;
int ret;
mutex_lock(&device->group->group_lock);
ret = vfio_device_assign_container(device);
mutex_unlock(&device->group->group_lock);
if (ret)
return ERR_PTR(ret);
mutex_lock(&device->dev_set->lock);
device->open_count++;
if (device->open_count == 1) {
ret = vfio_device_first_open(device);
if (ret)
goto err_unassign_container;
goto err_unlock;
}
mutex_unlock(&device->dev_set->lock);
@ -830,10 +831,9 @@ err_close_device:
mutex_lock(&device->dev_set->lock);
if (device->open_count == 1)
vfio_device_last_close(device);
err_unassign_container:
err_unlock:
device->open_count--;
mutex_unlock(&device->dev_set->lock);
vfio_device_unassign_container(device);
return ERR_PTR(ret);
}
@ -1040,8 +1040,6 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
device->open_count--;
mutex_unlock(&device->dev_set->lock);
vfio_device_unassign_container(device);
vfio_device_put_registration(device);
return 0;