mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
blk-mq: merge blk-softirq.c into blk-mq.c
__blk_complete_request is only called from the blk-mq code, and duplicates a lot of code from blk-mq.c. Move it there to prepare for better code sharing and simplifications. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
48778464bb
commit
c3077b5d97
@ -5,7 +5,7 @@
|
||||
|
||||
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
|
||||
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
|
||||
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
||||
blk-exec.o blk-merge.o blk-timeout.o \
|
||||
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
|
||||
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
|
||||
genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o
|
||||
|
135
block/blk-mq.c
135
block/blk-mq.c
@ -41,6 +41,8 @@
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
||||
|
||||
static void blk_mq_poll_stats_start(struct request_queue *q);
|
||||
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
|
||||
|
||||
@ -574,6 +576,130 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_end_request);
|
||||
|
||||
/*
|
||||
* Softirq action handler - move entries to local list and loop over them
|
||||
* while passing them to the queue registered handler.
|
||||
*/
|
||||
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
|
||||
{
|
||||
struct list_head *cpu_list, local_list;
|
||||
|
||||
local_irq_disable();
|
||||
cpu_list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_replace_init(cpu_list, &local_list);
|
||||
local_irq_enable();
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_entry(local_list.next, struct request, ipi_list);
|
||||
list_del_init(&rq->ipi_list);
|
||||
rq->q->mq_ops->complete(rq);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void trigger_softirq(void *data)
|
||||
{
|
||||
struct request *rq = data;
|
||||
struct list_head *list;
|
||||
|
||||
list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_add_tail(&rq->ipi_list, list);
|
||||
|
||||
if (list->next == &rq->ipi_list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup and invoke a run of 'trigger_softirq' on the given cpu.
|
||||
*/
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
call_single_data_t *data = &rq->csd;
|
||||
|
||||
data->func = trigger_softirq;
|
||||
data->info = rq;
|
||||
data->flags = 0;
|
||||
|
||||
smp_call_function_single_async(cpu, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
#else /* CONFIG_SMP */
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int blk_softirq_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* If a CPU goes away, splice its entries to the current CPU
|
||||
* and trigger a run of the softirq
|
||||
*/
|
||||
local_irq_disable();
|
||||
list_splice_init(&per_cpu(blk_cpu_done, cpu),
|
||||
this_cpu_ptr(&blk_cpu_done));
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __blk_complete_request(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
int cpu, ccpu = req->mq_ctx->cpu;
|
||||
unsigned long flags;
|
||||
bool shared = false;
|
||||
|
||||
BUG_ON(!q->mq_ops->complete);
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Select completion CPU
|
||||
*/
|
||||
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
|
||||
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
|
||||
shared = cpus_share_cache(cpu, ccpu);
|
||||
} else
|
||||
ccpu = cpu;
|
||||
|
||||
/*
|
||||
* If current CPU and requested CPU share a cache, run the softirq on
|
||||
* the current CPU. One might concern this is just like
|
||||
* QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
|
||||
* running in interrupt handler, and currently I/O controller doesn't
|
||||
* support multiple interrupts, so current CPU is unique actually. This
|
||||
* avoids IPI sending from current CPU to the first CPU of a group.
|
||||
*/
|
||||
if (ccpu == cpu || shared) {
|
||||
struct list_head *list;
|
||||
do_local:
|
||||
list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_add_tail(&req->ipi_list, list);
|
||||
|
||||
/*
|
||||
* if the list only contains our just added request,
|
||||
* signal a raise of the softirq. If there are already
|
||||
* entries there, someone already raised the irq but it
|
||||
* hasn't run yet.
|
||||
*/
|
||||
if (list->next == &req->ipi_list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
} else if (raise_blk_irq(ccpu, req))
|
||||
goto do_local;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __blk_mq_complete_request_remote(void *data)
|
||||
{
|
||||
struct request *rq = data;
|
||||
@ -3760,6 +3886,15 @@ EXPORT_SYMBOL(blk_mq_rq_cpu);
|
||||
|
||||
static int __init blk_mq_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
|
||||
|
||||
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
|
||||
"block/softirq:dead", NULL,
|
||||
blk_softirq_cpu_dead);
|
||||
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
|
||||
blk_mq_hctx_notify_dead);
|
||||
cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
|
||||
|
@ -1,156 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Functions related to softirq rq completions
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/topology.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
||||
|
||||
/*
|
||||
* Softirq action handler - move entries to local list and loop over them
|
||||
* while passing them to the queue registered handler.
|
||||
*/
|
||||
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
|
||||
{
|
||||
struct list_head *cpu_list, local_list;
|
||||
|
||||
local_irq_disable();
|
||||
cpu_list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_replace_init(cpu_list, &local_list);
|
||||
local_irq_enable();
|
||||
|
||||
while (!list_empty(&local_list)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_entry(local_list.next, struct request, ipi_list);
|
||||
list_del_init(&rq->ipi_list);
|
||||
rq->q->mq_ops->complete(rq);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void trigger_softirq(void *data)
|
||||
{
|
||||
struct request *rq = data;
|
||||
struct list_head *list;
|
||||
|
||||
list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_add_tail(&rq->ipi_list, list);
|
||||
|
||||
if (list->next == &rq->ipi_list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup and invoke a run of 'trigger_softirq' on the given cpu.
|
||||
*/
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
call_single_data_t *data = &rq->csd;
|
||||
|
||||
data->func = trigger_softirq;
|
||||
data->info = rq;
|
||||
data->flags = 0;
|
||||
|
||||
smp_call_function_single_async(cpu, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
#else /* CONFIG_SMP */
|
||||
static int raise_blk_irq(int cpu, struct request *rq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int blk_softirq_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* If a CPU goes away, splice its entries to the current CPU
|
||||
* and trigger a run of the softirq
|
||||
*/
|
||||
local_irq_disable();
|
||||
list_splice_init(&per_cpu(blk_cpu_done, cpu),
|
||||
this_cpu_ptr(&blk_cpu_done));
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __blk_complete_request(struct request *req)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
int cpu, ccpu = req->mq_ctx->cpu;
|
||||
unsigned long flags;
|
||||
bool shared = false;
|
||||
|
||||
BUG_ON(!q->mq_ops->complete);
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Select completion CPU
|
||||
*/
|
||||
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
|
||||
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
|
||||
shared = cpus_share_cache(cpu, ccpu);
|
||||
} else
|
||||
ccpu = cpu;
|
||||
|
||||
/*
|
||||
* If current CPU and requested CPU share a cache, run the softirq on
|
||||
* the current CPU. One might concern this is just like
|
||||
* QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
|
||||
* running in interrupt handler, and currently I/O controller doesn't
|
||||
* support multiple interrupts, so current CPU is unique actually. This
|
||||
* avoids IPI sending from current CPU to the first CPU of a group.
|
||||
*/
|
||||
if (ccpu == cpu || shared) {
|
||||
struct list_head *list;
|
||||
do_local:
|
||||
list = this_cpu_ptr(&blk_cpu_done);
|
||||
list_add_tail(&req->ipi_list, list);
|
||||
|
||||
/*
|
||||
* if the list only contains our just added request,
|
||||
* signal a raise of the softirq. If there are already
|
||||
* entries there, someone already raised the irq but it
|
||||
* hasn't run yet.
|
||||
*/
|
||||
if (list->next == &req->ipi_list)
|
||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||
} else if (raise_blk_irq(ccpu, req))
|
||||
goto do_local;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __init int blk_softirq_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
||||
|
||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
|
||||
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
|
||||
"block/softirq:dead", NULL,
|
||||
blk_softirq_cpu_dead);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(blk_softirq_init);
|
@ -1078,7 +1078,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
|
||||
extern bool blk_update_request(struct request *rq, blk_status_t error,
|
||||
unsigned int nr_bytes);
|
||||
|
||||
extern void __blk_complete_request(struct request *);
|
||||
extern void blk_abort_request(struct request *);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user