forked from Minki/linux
[SCSI] fcoe: allow SCSI-FCP to be processed directly in softirq context
Allow FCP frames to bypass the FCoE receive processing threads and handle them directly in softirq context, if they are received on the correct CPU. This preserves the queuing to threads for scaling out receive processing to multiple CPUs, but allows FCoE-aware multi-queue network drivers that direct frames to the originating CPUs to handle FCP processing with less scheduling latency. Only FCP is handled directly, because libfc makes use of mutexes in ELS handling routines. The bulk of this change is just moving the FCoE receive processing out of the receive thread function, leaving behind just the thread and queue management. The interesting bits are in fcoe_rcv() Signed-off-by: Chris Leech <christopher.leech@intel.com> Signed-off-by: Robert Love <robert.w.love@intel.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
parent
70d919fbd9
commit
859b7b649a
@ -109,6 +109,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
|
||||
struct fc_frame *,
|
||||
void *),
|
||||
void *, u32 timeout);
|
||||
static void fcoe_recv_frame(struct sk_buff *skb);
|
||||
|
||||
module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(create, "string");
|
||||
@ -1241,11 +1242,25 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
* this skb. We also have this receive thread locked,
|
||||
* so we're free to queue skbs into it's queue.
|
||||
*/
|
||||
|
||||
/* If this is a SCSI-FCP frame, and this is already executing on the
|
||||
* correct CPU, and the queue for this CPU is empty, then go ahead
|
||||
* and process the frame directly in the softirq context.
|
||||
* This lets us process completions without context switching from the
|
||||
* NET_RX softirq, to our receive processing thread, and then back to
|
||||
* BLOCK softirq context.
|
||||
*/
|
||||
if (fh->fh_type == FC_TYPE_FCP &&
|
||||
cpu == smp_processor_id() &&
|
||||
skb_queue_empty(&fps->fcoe_rx_list)) {
|
||||
spin_unlock_bh(&fps->fcoe_rx_list.lock);
|
||||
fcoe_recv_frame(skb);
|
||||
} else {
|
||||
__skb_queue_tail(&fps->fcoe_rx_list, skb);
|
||||
if (fps->fcoe_rx_list.qlen == 1)
|
||||
wake_up_process(fps->thread);
|
||||
|
||||
spin_unlock_bh(&fps->fcoe_rx_list.lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
@ -1503,48 +1518,29 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_percpu_receive_thread() - The per-CPU packet receive thread
|
||||
* @arg: The per-CPU context
|
||||
*
|
||||
* Return: 0 for success
|
||||
* fcoe_recv_frame() - process a single received frame
|
||||
* @skb: frame to process
|
||||
*/
|
||||
int fcoe_percpu_receive_thread(void *arg)
|
||||
static void fcoe_recv_frame(struct sk_buff *skb)
|
||||
{
|
||||
struct fcoe_percpu_s *p = arg;
|
||||
u32 fr_len;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
struct sk_buff *skb;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
u8 *mac = NULL;
|
||||
struct fcoe_port *port;
|
||||
struct fcoe_hdr *hp;
|
||||
|
||||
set_user_nice(current, -20);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
}
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
fr = fcoe_dev_from_skb(skb);
|
||||
lport = fr->fr_dev;
|
||||
if (unlikely(!lport)) {
|
||||
if (skb->destructor != fcoe_percpu_flush_done)
|
||||
FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
|
||||
FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
|
||||
@ -1580,7 +1576,7 @@ int fcoe_percpu_receive_thread(void *arg)
|
||||
FC_FCOE_VER);
|
||||
stats->ErrorFrames++;
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
|
||||
skb_pull(skb, sizeof(struct fcoe_hdr));
|
||||
@ -1597,13 +1593,13 @@ int fcoe_percpu_receive_thread(void *arg)
|
||||
/* Copy out the CRC and EOF trailer for access */
|
||||
if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
fr_eof(fp) = crc_eof.fcoe_eof;
|
||||
fr_crc(fp) = crc_eof.fcoe_crc32;
|
||||
if (pskb_trim(skb, fr_len)) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1621,7 +1617,7 @@ int fcoe_percpu_receive_thread(void *arg)
|
||||
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
|
||||
fh->fh_type == FC_TYPE_FCP) {
|
||||
fc_exch_recv(lport, fp);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
|
||||
if (le32_to_cpu(fr_crc(fp)) !=
|
||||
@ -1632,11 +1628,40 @@ int fcoe_percpu_receive_thread(void *arg)
|
||||
stats->InvalidCRCCount++;
|
||||
stats->ErrorFrames++;
|
||||
fc_frame_free(fp);
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
|
||||
}
|
||||
fc_exch_recv(lport, fp);
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_percpu_receive_thread() - The per-CPU packet receive thread
|
||||
* @arg: The per-CPU context
|
||||
*
|
||||
* Return: 0 for success
|
||||
*/
|
||||
int fcoe_percpu_receive_thread(void *arg)
|
||||
{
|
||||
struct fcoe_percpu_s *p = arg;
|
||||
struct sk_buff *skb;
|
||||
|
||||
set_user_nice(current, -20);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
}
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
fcoe_recv_frame(skb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user