scsi: scsi_debug: Refine sdebug_blk_mq_poll()
Refine the sdebug_blk_mq_poll() function so it only takes the spinlock on the queue when it can see one or more requests with the in_use bitmap flag set. Link: https://lore.kernel.org/r/20220109012853.301953-5-dgilbert@interlog.com Signed-off-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
7d5a129b86
commit
b05d4e481e
@ -7396,6 +7396,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
|||||||
{
|
{
|
||||||
bool first;
|
bool first;
|
||||||
bool retiring = false;
|
bool retiring = false;
|
||||||
|
bool locked = false;
|
||||||
int num_entries = 0;
|
int num_entries = 0;
|
||||||
unsigned int qc_idx = 0;
|
unsigned int qc_idx = 0;
|
||||||
unsigned long iflags;
|
unsigned long iflags;
|
||||||
@ -7407,16 +7408,23 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
|||||||
struct sdebug_defer *sd_dp;
|
struct sdebug_defer *sd_dp;
|
||||||
|
|
||||||
sqp = sdebug_q_arr + queue_num;
|
sqp = sdebug_q_arr + queue_num;
|
||||||
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
|
||||||
|
if (qc_idx >= sdebug_max_queue)
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
|
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
|
||||||
|
if (!locked) {
|
||||||
|
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
||||||
|
locked = true;
|
||||||
|
}
|
||||||
if (first) {
|
if (first) {
|
||||||
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
|
|
||||||
first = false;
|
first = false;
|
||||||
|
if (!test_bit(qc_idx, sqp->in_use_bm))
|
||||||
|
continue;
|
||||||
} else {
|
} else {
|
||||||
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
|
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
|
||||||
}
|
}
|
||||||
if (unlikely(qc_idx >= sdebug_max_queue))
|
if (qc_idx >= sdebug_max_queue)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
sqcp = &sqp->qc_arr[qc_idx];
|
sqcp = &sqp->qc_arr[qc_idx];
|
||||||
@ -7465,11 +7473,14 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
|||||||
}
|
}
|
||||||
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
|
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
|
||||||
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||||||
|
locked = false;
|
||||||
scsi_done(scp); /* callback to mid level */
|
scsi_done(scp); /* callback to mid level */
|
||||||
spin_lock_irqsave(&sqp->qc_lock, iflags);
|
|
||||||
num_entries++;
|
num_entries++;
|
||||||
|
if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
|
||||||
|
break; /* if no more then exit without retaking spinlock */
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
if (locked)
|
||||||
|
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||||||
if (num_entries > 0)
|
if (num_entries > 0)
|
||||||
atomic_add(num_entries, &sdeb_mq_poll_count);
|
atomic_add(num_entries, &sdeb_mq_poll_count);
|
||||||
return num_entries;
|
return num_entries;
|
||||||
|
Loading…
Reference in New Issue
Block a user