mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
xen-blkfront: make blkif_io_lock spinlock per-device
This patch moves the global blkif_io_lock to the per-device structure. The spinlock seems to exists for two reasons: to disable IRQs when in the interrupt handlers for blkfront, and to protect the blkfront VBDs when a detachment is requested. Having a global blkif_io_lock doesn't make sense given the use case, and it drastically hinders performance due to contention. All VBDs with pending IOs have to take the lock in order to get work done, which serializes everything pretty badly. Signed-off-by: Steven Noonan <snoonan@amazon.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
dad5cf659b
commit
3467811e26
@ -82,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops;
|
||||
*/
|
||||
struct blkfront_info
|
||||
{
|
||||
spinlock_t io_lock;
|
||||
struct mutex mutex;
|
||||
struct xenbus_device *xbdev;
|
||||
struct gendisk *gd;
|
||||
@ -106,8 +107,6 @@ struct blkfront_info
|
||||
int is_ready;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(blkif_io_lock);
|
||||
|
||||
static unsigned int nr_minors;
|
||||
static unsigned long *minors;
|
||||
static DEFINE_SPINLOCK(minor_lock);
|
||||
@ -418,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
||||
struct request_queue *rq;
|
||||
struct blkfront_info *info = gd->private_data;
|
||||
|
||||
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
|
||||
rq = blk_init_queue(do_blkif_request, &info->io_lock);
|
||||
if (rq == NULL)
|
||||
return -1;
|
||||
|
||||
@ -635,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
|
||||
if (info->rq == NULL)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&blkif_io_lock, flags);
|
||||
spin_lock_irqsave(&info->io_lock, flags);
|
||||
|
||||
/* No more blkif_request(). */
|
||||
blk_stop_queue(info->rq);
|
||||
|
||||
/* No more gnttab callback work. */
|
||||
gnttab_cancel_free_callback(&info->callback);
|
||||
spin_unlock_irqrestore(&blkif_io_lock, flags);
|
||||
spin_unlock_irqrestore(&info->io_lock, flags);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_work_sync(&info->work);
|
||||
@ -674,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work)
|
||||
{
|
||||
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
|
||||
|
||||
spin_lock_irq(&blkif_io_lock);
|
||||
spin_lock_irq(&info->io_lock);
|
||||
if (info->connected == BLKIF_STATE_CONNECTED)
|
||||
kick_pending_request_queues(info);
|
||||
spin_unlock_irq(&blkif_io_lock);
|
||||
spin_unlock_irq(&info->io_lock);
|
||||
}
|
||||
|
||||
static void blkif_free(struct blkfront_info *info, int suspend)
|
||||
{
|
||||
/* Prevent new requests being issued until we fix things up. */
|
||||
spin_lock_irq(&blkif_io_lock);
|
||||
spin_lock_irq(&info->io_lock);
|
||||
info->connected = suspend ?
|
||||
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
|
||||
/* No more blkif_request(). */
|
||||
@ -691,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||
blk_stop_queue(info->rq);
|
||||
/* No more gnttab callback work. */
|
||||
gnttab_cancel_free_callback(&info->callback);
|
||||
spin_unlock_irq(&blkif_io_lock);
|
||||
spin_unlock_irq(&info->io_lock);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_work_sync(&info->work);
|
||||
@ -727,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
struct blkfront_info *info = (struct blkfront_info *)dev_id;
|
||||
int error;
|
||||
|
||||
spin_lock_irqsave(&blkif_io_lock, flags);
|
||||
spin_lock_irqsave(&info->io_lock, flags);
|
||||
|
||||
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
|
||||
spin_unlock_irqrestore(&blkif_io_lock, flags);
|
||||
spin_unlock_irqrestore(&info->io_lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -815,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
|
||||
kick_pending_request_queues(info);
|
||||
|
||||
spin_unlock_irqrestore(&blkif_io_lock, flags);
|
||||
spin_unlock_irqrestore(&info->io_lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -990,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||
}
|
||||
|
||||
mutex_init(&info->mutex);
|
||||
spin_lock_init(&info->io_lock);
|
||||
info->xbdev = dev;
|
||||
info->vdevice = vdevice;
|
||||
info->connected = BLKIF_STATE_DISCONNECTED;
|
||||
@ -1067,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
||||
spin_lock_irq(&blkif_io_lock);
|
||||
spin_lock_irq(&info->io_lock);
|
||||
|
||||
/* Now safe for us to use the shared ring */
|
||||
info->connected = BLKIF_STATE_CONNECTED;
|
||||
@ -1078,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
/* Kick any other new requests queued since we resumed */
|
||||
kick_pending_request_queues(info);
|
||||
|
||||
spin_unlock_irq(&blkif_io_lock);
|
||||
spin_unlock_irq(&info->io_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1276,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
||||
/* Kick pending requests. */
|
||||
spin_lock_irq(&blkif_io_lock);
|
||||
spin_lock_irq(&info->io_lock);
|
||||
info->connected = BLKIF_STATE_CONNECTED;
|
||||
kick_pending_request_queues(info);
|
||||
spin_unlock_irq(&blkif_io_lock);
|
||||
spin_unlock_irq(&info->io_lock);
|
||||
|
||||
add_disk(info->gd);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user