forked from Minki/linux
[IDE] Use the block layer deferred softirq request completion
This patch makes IDE use the new blk_complete_request() interface. There's still room for improvement, as __ide_end_request() really could drop the lock after getting HWGROUP->rq (why does it need to hold it in the first place? If ->rq access isn't serialized, we are screwed anyways). Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
1aea6434ee
commit
8672d57138
@ -55,9 +55,22 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/bitops.h>
|
||||
|
||||
void ide_softirq_done(struct request *rq)
|
||||
{
|
||||
request_queue_t *q = rq->q;
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
end_that_request_chunk(rq, rq->errors, rq->data_len);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
end_that_request_last(rq, rq->errors);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
||||
int nr_sectors)
|
||||
{
|
||||
unsigned int nbytes;
|
||||
int ret = 1;
|
||||
|
||||
BUG_ON(!(rq->flags & REQ_STARTED));
|
||||
@ -81,17 +94,28 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
||||
HWGROUP(drive)->hwif->ide_dma_on(drive);
|
||||
}
|
||||
|
||||
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
|
||||
if (blk_rq_tagged(rq))
|
||||
blk_queue_end_tag(drive->queue, rq);
|
||||
|
||||
/*
|
||||
* For partial completions (or non fs/pc requests), use the regular
|
||||
* direct completion path.
|
||||
*/
|
||||
nbytes = nr_sectors << 9;
|
||||
if (rq_all_done(rq, nbytes)) {
|
||||
rq->errors = uptodate;
|
||||
rq->data_len = nbytes;
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq, uptodate);
|
||||
blk_complete_request(rq);
|
||||
ret = 0;
|
||||
} else {
|
||||
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq, uptodate);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__ide_end_request);
|
||||
@ -113,6 +137,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
/*
|
||||
* room for locking improvements here, the calls below don't
|
||||
* need the queue lock held at all
|
||||
*/
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
rq = HWGROUP(drive)->rq;
|
||||
|
||||
|
@ -1011,6 +1011,8 @@ static int ide_init_queue(ide_drive_t *drive)
|
||||
blk_queue_max_hw_segments(q, max_sg_entries);
|
||||
blk_queue_max_phys_segments(q, max_sg_entries);
|
||||
|
||||
blk_queue_softirq_done(q, ide_softirq_done);
|
||||
|
||||
/* assign drive queue */
|
||||
drive->queue = q;
|
||||
|
||||
|
@ -1001,6 +1001,7 @@ extern int noautodma;
|
||||
|
||||
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
|
||||
extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
|
||||
extern void ide_softirq_done(struct request *rq);
|
||||
|
||||
/*
|
||||
* This is used on exit from the driver to designate the next irq handler
|
||||
|
Loading…
Reference in New Issue
Block a user