2008-04-27 11:55:59 +00:00
|
|
|
/****************************************************************************
|
|
|
|
* Driver for Solarflare Solarstorm network controllers and boards
|
|
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
2009-11-29 15:16:19 +00:00
|
|
|
* Copyright 2005-2009 Solarflare Communications Inc.
|
2008-04-27 11:55:59 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
|
|
* by the Free Software Foundation, incorporated herein by reference.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/in.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2008-04-27 11:55:59 +00:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include "net_driver.h"
|
|
|
|
#include "efx.h"
|
2009-11-29 15:12:08 +00:00
|
|
|
#include "nic.h"
|
2008-05-07 12:36:19 +00:00
|
|
|
#include "selftest.h"
|
2008-04-27 11:55:59 +00:00
|
|
|
#include "workarounds.h"
|
|
|
|
|
|
|
|
/* Number of RX descriptors pushed at once. */
|
|
|
|
#define EFX_RX_BATCH 8
|
|
|
|
|
2010-06-01 11:20:53 +00:00
|
|
|
/* Maximum size of a buffer sharing a page */
|
|
|
|
#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
/* Size of buffer allocated for skb header area. */
|
|
|
|
#define EFX_SKB_HEADERS 64u
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rx_alloc_method - RX buffer allocation method
|
|
|
|
*
|
|
|
|
* This driver supports two methods for allocating and using RX buffers:
|
|
|
|
* each RX buffer may be backed by an skb or by an order-n page.
|
|
|
|
*
|
|
|
|
* When LRO is in use then the second method has a lower overhead,
|
|
|
|
* since we don't have to allocate then free skbs on reassembled frames.
|
|
|
|
*
|
|
|
|
* Values:
|
|
|
|
* - RX_ALLOC_METHOD_AUTO = 0
|
|
|
|
* - RX_ALLOC_METHOD_SKB = 1
|
|
|
|
* - RX_ALLOC_METHOD_PAGE = 2
|
|
|
|
*
|
|
|
|
* The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
|
|
|
|
* controlled by the parameters below.
|
|
|
|
*
|
|
|
|
* - Since pushing and popping descriptors are separated by the rx_queue
|
|
|
|
* size, so the watermarks should be ~rxd_size.
|
|
|
|
* - The performance win by using page-based allocation for LRO is less
|
|
|
|
* than the performance hit of using page-based allocation of non-LRO,
|
|
|
|
* so the watermarks should reflect this.
|
|
|
|
*
|
|
|
|
* Per channel we maintain a single variable, updated by each channel:
|
|
|
|
*
|
|
|
|
* rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
|
|
|
|
* RX_ALLOC_FACTOR_SKB)
|
|
|
|
* Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
|
|
|
|
* limits the hysteresis), and update the allocation strategy:
|
|
|
|
*
|
|
|
|
* rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
|
|
|
|
* RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
|
|
|
|
*/
|
2009-10-29 07:21:33 +00:00
|
|
|
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
#define RX_ALLOC_LEVEL_LRO 0x2000
|
|
|
|
#define RX_ALLOC_LEVEL_MAX 0x3000
|
|
|
|
#define RX_ALLOC_FACTOR_LRO 1
|
|
|
|
#define RX_ALLOC_FACTOR_SKB (-2)
|
|
|
|
|
|
|
|
/* This is the percentage fill level below which new RX descriptors
|
|
|
|
* will be added to the RX descriptor ring.
|
|
|
|
*/
|
|
|
|
static unsigned int rx_refill_threshold = 90;
|
|
|
|
|
|
|
|
/* This is the percentage fill level to which an RX queue will be refilled
|
|
|
|
* when the "RX refill threshold" is reached.
|
|
|
|
*/
|
|
|
|
static unsigned int rx_refill_limit = 95;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RX maximum head room required.
|
|
|
|
*
|
|
|
|
* This must be at least 1 to prevent overflow and at least 2 to allow
|
2010-06-01 11:20:53 +00:00
|
|
|
* pipelined receives.
|
2008-04-27 11:55:59 +00:00
|
|
|
*/
|
2010-06-01 11:20:53 +00:00
|
|
|
#define EFX_RXD_HEAD_ROOM 2
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2008-05-16 20:16:10 +00:00
|
|
|
static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
|
|
|
|
{
|
|
|
|
/* Offset is always within one page, so we don't need to consider
|
|
|
|
* the page order.
|
|
|
|
*/
|
2008-05-16 20:16:31 +00:00
|
|
|
return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
|
2008-05-16 20:16:10 +00:00
|
|
|
}
|
|
|
|
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
|
|
|
|
{
|
|
|
|
return PAGE_SIZE << efx->rx_buffer_order;
|
|
|
|
}
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-23 11:31:28 +00:00
|
|
|
static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
|
2010-06-25 07:05:33 +00:00
|
|
|
return __le32_to_cpup((const __le32 *)(buf->data - 4));
|
2010-06-23 11:31:28 +00:00
|
|
|
#else
|
2010-06-25 07:05:33 +00:00
|
|
|
const u8 *data = (const u8 *)(buf->data - 4);
|
2010-06-23 11:31:28 +00:00
|
|
|
return ((u32)data[0] |
|
|
|
|
(u32)data[1] << 8 |
|
|
|
|
(u32)data[2] << 16 |
|
|
|
|
(u32)data[3] << 24);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
/**
|
2010-06-01 11:33:17 +00:00
|
|
|
* efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
|
2008-04-27 11:55:59 +00:00
|
|
|
*
|
|
|
|
* @rx_queue: Efx RX queue
|
|
|
|
*
|
2010-06-01 11:33:17 +00:00
|
|
|
* This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
|
|
|
|
* struct efx_rx_buffer for each one. Return a negative error code or 0
|
|
|
|
* on success. May fail having only inserted fewer than EFX_RX_BATCH
|
|
|
|
* buffers.
|
2008-04-27 11:55:59 +00:00
|
|
|
*/
|
2010-06-01 11:33:17 +00:00
|
|
|
static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
|
struct net_device *net_dev = efx->net_dev;
|
2010-06-01 11:33:17 +00:00
|
|
|
struct efx_rx_buffer *rx_buf;
|
2008-04-27 11:55:59 +00:00
|
|
|
int skb_len = efx->rx_buffer_len;
|
2010-06-01 11:33:17 +00:00
|
|
|
unsigned index, count;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:33:17 +00:00
|
|
|
for (count = 0; count < EFX_RX_BATCH; ++count) {
|
|
|
|
index = rx_queue->added_count & EFX_RXQ_MASK;
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:33:17 +00:00
|
|
|
rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
|
|
|
|
if (unlikely(!rx_buf->skb))
|
|
|
|
return -ENOMEM;
|
|
|
|
rx_buf->page = NULL;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:33:17 +00:00
|
|
|
/* Adjust the SKB for padding and checksum */
|
|
|
|
skb_reserve(rx_buf->skb, NET_IP_ALIGN);
|
|
|
|
rx_buf->len = skb_len - NET_IP_ALIGN;
|
|
|
|
rx_buf->data = (char *)rx_buf->skb->data;
|
|
|
|
rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
|
|
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
|
|
|
|
rx_buf->data, rx_buf->len,
|
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
if (unlikely(pci_dma_mapping_error(efx->pci_dev,
|
|
|
|
rx_buf->dma_addr))) {
|
|
|
|
dev_kfree_skb_any(rx_buf->skb);
|
|
|
|
rx_buf->skb = NULL;
|
|
|
|
return -EIO;
|
|
|
|
}
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:33:17 +00:00
|
|
|
++rx_queue->added_count;
|
|
|
|
++rx_queue->alloc_skb_count;
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2010-06-01 11:33:17 +00:00
|
|
|
* efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
|
2008-04-27 11:55:59 +00:00
|
|
|
*
|
|
|
|
* @rx_queue: Efx RX queue
|
|
|
|
*
|
2010-06-01 11:33:17 +00:00
|
|
|
* This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
|
|
|
|
* and populates struct efx_rx_buffers for each one. Return a negative error
|
|
|
|
* code or 0 on success. If a single page can be split between two buffers,
|
|
|
|
* then the page will either be inserted fully, or not at at all.
|
2008-04-27 11:55:59 +00:00
|
|
|
*/
|
2010-06-01 11:33:17 +00:00
|
|
|
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2010-06-01 11:33:17 +00:00
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
struct page *page;
|
2010-06-01 11:20:53 +00:00
|
|
|
void *page_addr;
|
|
|
|
struct efx_rx_page_state *state;
|
2010-06-01 11:33:17 +00:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
unsigned index, count;
|
|
|
|
|
|
|
|
/* We can split a page between two buffers */
|
|
|
|
BUILD_BUG_ON(EFX_RX_BATCH & 1);
|
|
|
|
|
|
|
|
for (count = 0; count < EFX_RX_BATCH; ++count) {
|
|
|
|
page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
|
|
|
|
efx->rx_buffer_order);
|
|
|
|
if (unlikely(page == NULL))
|
2008-04-27 11:55:59 +00:00
|
|
|
return -ENOMEM;
|
2010-06-01 11:33:17 +00:00
|
|
|
dma_addr = pci_map_page(efx->pci_dev, page, 0,
|
|
|
|
efx_rx_buf_size(efx),
|
2008-04-27 11:55:59 +00:00
|
|
|
PCI_DMA_FROMDEVICE);
|
2008-07-26 02:44:49 +00:00
|
|
|
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
|
2010-06-01 11:33:17 +00:00
|
|
|
__free_pages(page, efx->rx_buffer_order);
|
2008-04-27 11:55:59 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2010-06-01 11:20:53 +00:00
|
|
|
page_addr = page_address(page);
|
|
|
|
state = page_addr;
|
|
|
|
state->refcnt = 0;
|
|
|
|
state->dma_addr = dma_addr;
|
|
|
|
|
|
|
|
page_addr += sizeof(struct efx_rx_page_state);
|
|
|
|
dma_addr += sizeof(struct efx_rx_page_state);
|
2010-06-01 11:33:17 +00:00
|
|
|
|
|
|
|
split:
|
|
|
|
index = rx_queue->added_count & EFX_RXQ_MASK;
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
2010-06-01 11:20:53 +00:00
|
|
|
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
2010-06-01 11:33:17 +00:00
|
|
|
rx_buf->skb = NULL;
|
|
|
|
rx_buf->page = page;
|
2010-06-01 11:20:53 +00:00
|
|
|
rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
|
2010-06-01 11:33:17 +00:00
|
|
|
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
|
|
|
++rx_queue->added_count;
|
|
|
|
++rx_queue->alloc_page_count;
|
2010-06-01 11:20:53 +00:00
|
|
|
++state->refcnt;
|
2010-06-01 11:33:17 +00:00
|
|
|
|
2010-06-01 11:20:53 +00:00
|
|
|
if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
|
2010-06-01 11:33:17 +00:00
|
|
|
/* Use the second half of the page */
|
|
|
|
get_page(page);
|
|
|
|
dma_addr += (PAGE_SIZE >> 1);
|
|
|
|
page_addr += (PAGE_SIZE >> 1);
|
|
|
|
++count;
|
|
|
|
goto split;
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-01 11:47:12 +00:00
|
|
|
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
if (rx_buf->page) {
|
2010-06-01 11:20:53 +00:00
|
|
|
struct efx_rx_page_state *state;
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
EFX_BUG_ON_PARANOID(rx_buf->skb);
|
2010-06-01 11:33:17 +00:00
|
|
|
|
2010-06-01 11:20:53 +00:00
|
|
|
state = page_address(rx_buf->page);
|
|
|
|
if (--state->refcnt == 0) {
|
2010-06-01 11:33:17 +00:00
|
|
|
pci_unmap_page(efx->pci_dev,
|
2010-06-01 11:20:53 +00:00
|
|
|
state->dma_addr,
|
2008-05-16 20:16:10 +00:00
|
|
|
efx_rx_buf_size(efx),
|
|
|
|
PCI_DMA_FROMDEVICE);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
} else if (likely(rx_buf->skb)) {
|
|
|
|
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
|
|
|
|
rx_buf->len, PCI_DMA_FROMDEVICE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-01 11:47:12 +00:00
|
|
|
static void efx_free_rx_buffer(struct efx_nic *efx,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
if (rx_buf->page) {
|
|
|
|
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
} else if (likely(rx_buf->skb)) {
|
|
|
|
dev_kfree_skb_any(rx_buf->skb);
|
|
|
|
rx_buf->skb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-01 11:47:12 +00:00
|
|
|
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
|
|
|
efx_free_rx_buffer(rx_queue->efx, rx_buf);
|
|
|
|
}
|
|
|
|
|
2010-06-01 11:20:34 +00:00
|
|
|
/* Attempt to resurrect the other receive buffer that used to share this page,
|
|
|
|
* which had previously been passed up to the kernel and freed. */
|
|
|
|
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
|
|
|
{
|
2010-06-01 11:20:53 +00:00
|
|
|
struct efx_rx_page_state *state = page_address(rx_buf->page);
|
2010-06-01 11:20:34 +00:00
|
|
|
struct efx_rx_buffer *new_buf;
|
2010-06-01 11:20:53 +00:00
|
|
|
unsigned fill_level, index;
|
|
|
|
|
|
|
|
/* +1 because efx_rx_packet() incremented removed_count. +1 because
|
|
|
|
* we'd like to insert an additional descriptor whilst leaving
|
|
|
|
* EFX_RXD_HEAD_ROOM for the non-recycle path */
|
|
|
|
fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
|
|
|
|
if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
|
|
|
|
/* We could place "state" on a list, and drain the list in
|
|
|
|
* efx_fast_push_rx_descriptors(). For now, this will do. */
|
|
|
|
return;
|
|
|
|
}
|
2010-06-01 11:20:34 +00:00
|
|
|
|
2010-06-01 11:20:53 +00:00
|
|
|
++state->refcnt;
|
2010-06-01 11:20:34 +00:00
|
|
|
get_page(rx_buf->page);
|
|
|
|
|
|
|
|
index = rx_queue->added_count & EFX_RXQ_MASK;
|
|
|
|
new_buf = efx_rx_buffer(rx_queue, index);
|
2010-06-01 11:20:53 +00:00
|
|
|
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
|
2010-06-01 11:20:34 +00:00
|
|
|
new_buf->skb = NULL;
|
|
|
|
new_buf->page = rx_buf->page;
|
2010-06-01 11:20:53 +00:00
|
|
|
new_buf->data = (void *)
|
|
|
|
((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
|
2010-06-01 11:20:34 +00:00
|
|
|
new_buf->len = rx_buf->len;
|
|
|
|
++rx_queue->added_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Recycle the given rx buffer directly back into the rx_queue. There is
|
|
|
|
* always room to add this buffer, because we've just popped a buffer. */
|
|
|
|
static void efx_recycle_rx_buffer(struct efx_channel *channel,
|
|
|
|
struct efx_rx_buffer *rx_buf)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
|
struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
|
|
|
|
struct efx_rx_buffer *new_buf;
|
|
|
|
unsigned index;
|
|
|
|
|
2010-06-01 11:20:53 +00:00
|
|
|
if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
|
|
|
|
page_count(rx_buf->page) == 1)
|
|
|
|
efx_resurrect_rx_buffer(rx_queue, rx_buf);
|
2010-06-01 11:20:34 +00:00
|
|
|
|
|
|
|
index = rx_queue->added_count & EFX_RXQ_MASK;
|
|
|
|
new_buf = efx_rx_buffer(rx_queue, index);
|
|
|
|
|
|
|
|
memcpy(new_buf, rx_buf, sizeof(*new_buf));
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
rx_buf->skb = NULL;
|
|
|
|
++rx_queue->added_count;
|
|
|
|
}
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
/**
|
|
|
|
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
|
|
|
* @rx_queue: RX descriptor queue
|
|
|
|
* This will aim to fill the RX descriptor queue up to
|
|
|
|
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
|
2010-06-01 11:19:39 +00:00
|
|
|
* memory to do so, a slow fill will be scheduled.
|
|
|
|
*
|
|
|
|
* The caller must provide serialisation (none is used here). In practise,
|
|
|
|
* this means this function must run from the NAPI handler, or be called
|
|
|
|
* when NAPI is disabled.
|
2008-04-27 11:55:59 +00:00
|
|
|
*/
|
2010-06-01 11:19:39 +00:00
|
|
|
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
2010-06-01 11:33:17 +00:00
|
|
|
struct efx_channel *channel = rx_queue->channel;
|
|
|
|
unsigned fill_level;
|
|
|
|
int space, rc = 0;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:19:39 +00:00
|
|
|
/* Calculate current fill level, and exit if we don't need to fill */
|
2008-04-27 11:55:59 +00:00
|
|
|
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
2009-10-23 08:30:58 +00:00
|
|
|
EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
|
2008-04-27 11:55:59 +00:00
|
|
|
if (fill_level >= rx_queue->fast_fill_trigger)
|
2010-06-01 11:20:34 +00:00
|
|
|
goto out;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Record minimum fill level */
|
2008-05-16 20:15:49 +00:00
|
|
|
if (unlikely(fill_level < rx_queue->min_fill)) {
|
2008-04-27 11:55:59 +00:00
|
|
|
if (fill_level)
|
|
|
|
rx_queue->min_fill = fill_level;
|
2008-05-16 20:15:49 +00:00
|
|
|
}
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
space = rx_queue->fast_fill_limit - fill_level;
|
|
|
|
if (space < EFX_RX_BATCH)
|
2010-06-01 11:20:34 +00:00
|
|
|
goto out;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
|
|
|
"RX queue %d fast-filling descriptor ring from"
|
|
|
|
" level %d to level %d using %s allocation\n",
|
|
|
|
rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
|
|
|
|
channel->rx_alloc_push_pages ? "page" : "skb");
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
do {
|
2010-06-01 11:33:17 +00:00
|
|
|
if (channel->rx_alloc_push_pages)
|
|
|
|
rc = efx_init_rx_buffers_page(rx_queue);
|
|
|
|
else
|
|
|
|
rc = efx_init_rx_buffers_skb(rx_queue);
|
|
|
|
if (unlikely(rc)) {
|
|
|
|
/* Ensure that we don't leave the rx queue empty */
|
|
|
|
if (rx_queue->added_count == rx_queue->removed_count)
|
|
|
|
efx_schedule_slow_fill(rx_queue);
|
|
|
|
goto out;
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
|
|
|
"RX queue %d fast-filled descriptor ring "
|
|
|
|
"to level %d\n", rx_queue->queue,
|
|
|
|
rx_queue->added_count - rx_queue->removed_count);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
out:
|
2010-06-01 11:20:34 +00:00
|
|
|
if (rx_queue->notified_count != rx_queue->added_count)
|
|
|
|
efx_nic_notify_rx_desc(rx_queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
2010-06-01 11:19:39 +00:00
|
|
|
void efx_rx_slow_fill(unsigned long context)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
2010-06-01 11:19:39 +00:00
|
|
|
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
|
|
|
|
struct efx_channel *channel = rx_queue->channel;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:19:39 +00:00
|
|
|
/* Post an event to cause NAPI to run and refill the queue */
|
|
|
|
efx_nic_generate_fill_event(channel);
|
2008-04-27 11:55:59 +00:00
|
|
|
++rx_queue->slow_fill_count;
|
|
|
|
}
|
|
|
|
|
2008-09-01 11:47:12 +00:00
|
|
|
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
|
int len, bool *discard,
|
|
|
|
bool *leak_packet)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
|
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
|
|
|
|
|
|
|
if (likely(len <= max_len))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* The packet must be discarded, but this is only a fatal error
|
|
|
|
* if the caller indicated it was
|
|
|
|
*/
|
2008-09-01 11:46:50 +00:00
|
|
|
*discard = true;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
|
2010-06-23 11:30:07 +00:00
|
|
|
if (net_ratelimit())
|
|
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
|
|
" RX queue %d seriously overlength "
|
|
|
|
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
|
|
|
|
rx_queue->queue, len, max_len,
|
|
|
|
efx->type->rx_buffer_padding);
|
2008-04-27 11:55:59 +00:00
|
|
|
/* If this buffer was skb-allocated, then the meta
|
|
|
|
* data at the end of the skb will be trashed. So
|
|
|
|
* we have no choice but to leak the fragment.
|
|
|
|
*/
|
|
|
|
*leak_packet = (rx_buf->skb != NULL);
|
|
|
|
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
|
|
|
|
} else {
|
2010-06-23 11:30:07 +00:00
|
|
|
if (net_ratelimit())
|
|
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
|
|
" RX queue %d overlength RX event "
|
|
|
|
"(0x%x > 0x%x)\n",
|
|
|
|
rx_queue->queue, len, max_len);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rx_queue->channel->n_rx_overlength++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pass a received packet up through the generic LRO stack
|
|
|
|
*
|
|
|
|
* Handles driverlink veto, and passes the fragment up via
|
|
|
|
* the appropriate LRO method
|
|
|
|
*/
|
2008-09-01 11:47:12 +00:00
|
|
|
static void efx_rx_packet_lro(struct efx_channel *channel,
|
2009-10-28 10:43:49 +00:00
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
|
bool checksummed)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
2009-01-19 05:50:16 +00:00
|
|
|
struct napi_struct *napi = &channel->napi_str;
|
2009-10-29 07:21:24 +00:00
|
|
|
gro_result_t gro_result;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Pass the skb/page into the LRO engine */
|
|
|
|
if (rx_buf->page) {
|
2010-06-23 11:31:28 +00:00
|
|
|
struct efx_nic *efx = channel->efx;
|
2009-11-23 16:02:25 +00:00
|
|
|
struct page *page = rx_buf->page;
|
|
|
|
struct sk_buff *skb;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2009-11-23 16:02:25 +00:00
|
|
|
EFX_BUG_ON_PARANOID(rx_buf->skb);
|
|
|
|
rx_buf->page = NULL;
|
|
|
|
|
|
|
|
skb = napi_get_frags(napi);
|
2009-04-16 09:02:07 +00:00
|
|
|
if (!skb) {
|
2009-11-23 16:02:25 +00:00
|
|
|
put_page(page);
|
|
|
|
return;
|
2009-04-16 09:02:07 +00:00
|
|
|
}
|
|
|
|
|
2010-06-23 11:31:28 +00:00
|
|
|
if (efx->net_dev->features & NETIF_F_RXHASH)
|
|
|
|
skb->rxhash = efx_rx_buf_hash(rx_buf);
|
|
|
|
|
2009-11-23 16:02:25 +00:00
|
|
|
skb_shinfo(skb)->frags[0].page = page;
|
2009-04-16 09:02:07 +00:00
|
|
|
skb_shinfo(skb)->frags[0].page_offset =
|
|
|
|
efx_rx_buf_offset(rx_buf);
|
|
|
|
skb_shinfo(skb)->frags[0].size = rx_buf->len;
|
|
|
|
skb_shinfo(skb)->nr_frags = 1;
|
|
|
|
|
|
|
|
skb->len = rx_buf->len;
|
|
|
|
skb->data_len = rx_buf->len;
|
|
|
|
skb->truesize += rx_buf->len;
|
2009-10-28 10:43:49 +00:00
|
|
|
skb->ip_summed =
|
|
|
|
checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2009-11-23 16:02:40 +00:00
|
|
|
skb_record_rx_queue(skb, channel->channel);
|
|
|
|
|
2009-10-29 07:21:24 +00:00
|
|
|
gro_result = napi_gro_frags(napi);
|
2008-04-27 11:55:59 +00:00
|
|
|
} else {
|
2009-11-23 16:02:25 +00:00
|
|
|
struct sk_buff *skb = rx_buf->skb;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2009-11-23 16:02:25 +00:00
|
|
|
EFX_BUG_ON_PARANOID(!skb);
|
|
|
|
EFX_BUG_ON_PARANOID(!checksummed);
|
2008-04-27 11:55:59 +00:00
|
|
|
rx_buf->skb = NULL;
|
2009-11-23 16:02:25 +00:00
|
|
|
|
|
|
|
gro_result = napi_gro_receive(napi, skb);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
2009-10-29 07:21:24 +00:00
|
|
|
|
|
|
|
if (gro_result == GRO_NORMAL) {
|
|
|
|
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
|
|
|
|
} else if (gro_result != GRO_DROP) {
|
|
|
|
channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
|
|
|
|
channel->irq_mod_score += 2;
|
|
|
|
}
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
2008-09-01 11:46:50 +00:00
|
|
|
unsigned int len, bool checksummed, bool discard)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
2010-06-01 11:20:34 +00:00
|
|
|
struct efx_channel *channel = rx_queue->channel;
|
2008-04-27 11:55:59 +00:00
|
|
|
struct efx_rx_buffer *rx_buf;
|
2008-09-01 11:46:50 +00:00
|
|
|
bool leak_packet = false;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
|
|
|
EFX_BUG_ON_PARANOID(!rx_buf->data);
|
|
|
|
EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
|
|
|
|
EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
|
|
|
|
|
|
|
|
/* This allows the refill path to post another buffer.
|
|
|
|
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
|
|
|
|
* isn't overwritten yet.
|
|
|
|
*/
|
|
|
|
rx_queue->removed_count++;
|
|
|
|
|
|
|
|
/* Validate the length encoded in the event vs the descriptor pushed */
|
|
|
|
efx_rx_packet__check_len(rx_queue, rx_buf, len,
|
|
|
|
&discard, &leak_packet);
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(efx, rx_status, efx->net_dev,
|
|
|
|
"RX queue %d received id %x at %llx+%x %s%s\n",
|
|
|
|
rx_queue->queue, index,
|
|
|
|
(unsigned long long)rx_buf->dma_addr, len,
|
|
|
|
(checksummed ? " [SUMMED]" : ""),
|
|
|
|
(discard ? " [DISCARD]" : ""));
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Discard packet, if instructed to do so */
|
|
|
|
if (unlikely(discard)) {
|
|
|
|
if (unlikely(leak_packet))
|
2010-06-01 11:20:34 +00:00
|
|
|
channel->n_skbuff_leaks++;
|
2008-04-27 11:55:59 +00:00
|
|
|
else
|
2010-06-01 11:20:34 +00:00
|
|
|
efx_recycle_rx_buffer(channel, rx_buf);
|
|
|
|
|
|
|
|
/* Don't hold off the previous receive */
|
|
|
|
rx_buf = NULL;
|
|
|
|
goto out;
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release card resources - assumes all RX buffers consumed in-order
|
|
|
|
* per RX queue
|
|
|
|
*/
|
|
|
|
efx_unmap_rx_buffer(efx, rx_buf);
|
|
|
|
|
|
|
|
/* Prefetch nice and early so data will (hopefully) be in cache by
|
|
|
|
* the time we look at it.
|
|
|
|
*/
|
|
|
|
prefetch(rx_buf->data);
|
|
|
|
|
|
|
|
/* Pipeline receives so that we give time for packet headers to be
|
|
|
|
* prefetched into cache.
|
|
|
|
*/
|
|
|
|
rx_buf->len = len;
|
2010-06-01 11:20:34 +00:00
|
|
|
out:
|
2008-04-27 11:55:59 +00:00
|
|
|
if (rx_queue->channel->rx_pkt)
|
|
|
|
__efx_rx_packet(rx_queue->channel,
|
|
|
|
rx_queue->channel->rx_pkt,
|
|
|
|
rx_queue->channel->rx_pkt_csummed);
|
|
|
|
rx_queue->channel->rx_pkt = rx_buf;
|
|
|
|
rx_queue->channel->rx_pkt_csummed = checksummed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle a received packet. Second half: Touches packet payload. */
|
|
|
|
void __efx_rx_packet(struct efx_channel *channel,
|
2008-09-01 11:46:50 +00:00
|
|
|
struct efx_rx_buffer *rx_buf, bool checksummed)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2010-06-25 07:05:33 +00:00
|
|
|
rx_buf->data += efx->type->rx_buffer_hash_size;
|
|
|
|
rx_buf->len -= efx->type->rx_buffer_hash_size;
|
|
|
|
|
2008-05-07 12:36:19 +00:00
|
|
|
/* If we're in loopback test, then pass the packet directly to the
|
|
|
|
* loopback layer, and free the rx_buf here
|
|
|
|
*/
|
|
|
|
if (unlikely(efx->loopback_selftest)) {
|
|
|
|
efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
|
|
|
|
efx_free_rx_buffer(efx, rx_buf);
|
2009-11-23 16:01:44 +00:00
|
|
|
return;
|
2008-05-07 12:36:19 +00:00
|
|
|
}
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
if (rx_buf->skb) {
|
|
|
|
prefetch(skb_shinfo(rx_buf->skb));
|
|
|
|
|
2010-06-25 07:05:33 +00:00
|
|
|
skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
|
2008-04-27 11:55:59 +00:00
|
|
|
skb_put(rx_buf->skb, rx_buf->len);
|
|
|
|
|
2010-06-23 11:31:28 +00:00
|
|
|
if (efx->net_dev->features & NETIF_F_RXHASH)
|
|
|
|
rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
|
|
|
|
|
2008-04-27 11:55:59 +00:00
|
|
|
/* Move past the ethernet header. rx_buf->data still points
|
|
|
|
* at the ethernet header */
|
|
|
|
rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
|
|
|
|
efx->net_dev);
|
2009-11-23 16:02:40 +00:00
|
|
|
|
|
|
|
skb_record_rx_queue(rx_buf->skb, channel->channel);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
2009-01-19 05:50:16 +00:00
|
|
|
if (likely(checksummed || rx_buf->page)) {
|
2009-10-28 10:43:49 +00:00
|
|
|
efx_rx_packet_lro(channel, rx_buf, checksummed);
|
2009-11-23 16:01:44 +00:00
|
|
|
return;
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
2009-01-19 05:50:16 +00:00
|
|
|
/* We now own the SKB */
|
|
|
|
skb = rx_buf->skb;
|
|
|
|
rx_buf->skb = NULL;
|
2008-04-27 11:55:59 +00:00
|
|
|
EFX_BUG_ON_PARANOID(!skb);
|
|
|
|
|
|
|
|
/* Set the SKB flags */
|
2009-01-19 05:50:16 +00:00
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Pass the packet up */
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
/* Update allocation strategy method */
|
|
|
|
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
|
|
|
|
}
|
|
|
|
|
|
|
|
void efx_rx_strategy(struct efx_channel *channel)
|
|
|
|
{
|
|
|
|
enum efx_rx_alloc_method method = rx_alloc_method;
|
|
|
|
|
|
|
|
/* Only makes sense to use page based allocation if LRO is enabled */
|
2009-01-19 05:50:16 +00:00
|
|
|
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
|
2008-04-27 11:55:59 +00:00
|
|
|
method = RX_ALLOC_METHOD_SKB;
|
|
|
|
} else if (method == RX_ALLOC_METHOD_AUTO) {
|
|
|
|
/* Constrain the rx_alloc_level */
|
|
|
|
if (channel->rx_alloc_level < 0)
|
|
|
|
channel->rx_alloc_level = 0;
|
|
|
|
else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
|
|
|
|
channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
|
|
|
|
|
|
|
|
/* Decide on the allocation method */
|
|
|
|
method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
|
|
|
|
RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Push the option */
|
|
|
|
channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
|
unsigned int rxq_size;
|
|
|
|
int rc;
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_dbg(efx, probe, efx->net_dev,
|
|
|
|
"creating RX queue %d\n", rx_queue->queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Allocate RX buffers */
|
2009-10-23 08:30:58 +00:00
|
|
|
rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
|
2008-04-27 11:55:59 +00:00
|
|
|
rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
|
2008-09-01 11:47:48 +00:00
|
|
|
if (!rx_queue->buffer)
|
|
|
|
return -ENOMEM;
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2009-11-29 03:43:56 +00:00
|
|
|
rc = efx_nic_probe_rx(rx_queue);
|
2008-09-01 11:47:48 +00:00
|
|
|
if (rc) {
|
|
|
|
kfree(rx_queue->buffer);
|
|
|
|
rx_queue->buffer = NULL;
|
|
|
|
}
|
2008-04-27 11:55:59 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-09-01 11:48:46 +00:00
|
|
|
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
2008-04-27 11:55:59 +00:00
|
|
|
{
|
|
|
|
unsigned int max_fill, trigger, limit;
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
|
|
|
"initialising RX queue %d\n", rx_queue->queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Initialise ptr fields */
|
|
|
|
rx_queue->added_count = 0;
|
|
|
|
rx_queue->notified_count = 0;
|
|
|
|
rx_queue->removed_count = 0;
|
|
|
|
rx_queue->min_fill = -1U;
|
|
|
|
rx_queue->min_overfill = -1U;
|
|
|
|
|
|
|
|
/* Initialise limit fields */
|
2009-10-23 08:30:58 +00:00
|
|
|
max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
|
2008-04-27 11:55:59 +00:00
|
|
|
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
|
|
|
limit = max_fill * min(rx_refill_limit, 100U) / 100U;
|
|
|
|
|
|
|
|
rx_queue->max_fill = max_fill;
|
|
|
|
rx_queue->fast_fill_trigger = trigger;
|
|
|
|
rx_queue->fast_fill_limit = limit;
|
|
|
|
|
|
|
|
/* Set up RX descriptor ring */
|
2009-11-29 03:43:56 +00:00
|
|
|
efx_nic_init_rx(rx_queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
|
|
|
"shutting down RX queue %d\n", rx_queue->queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2010-06-01 11:19:39 +00:00
|
|
|
del_timer_sync(&rx_queue->slow_fill);
|
2009-11-29 03:43:56 +00:00
|
|
|
efx_nic_fini_rx(rx_queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
/* Release RX buffers NB start at index 0 not current HW ptr */
|
|
|
|
if (rx_queue->buffer) {
|
2009-10-23 08:30:58 +00:00
|
|
|
for (i = 0; i <= EFX_RXQ_MASK; i++) {
|
2008-04-27 11:55:59 +00:00
|
|
|
rx_buf = efx_rx_buffer(rx_queue, i);
|
|
|
|
efx_fini_rx_buffer(rx_queue, rx_buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
|
|
|
{
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
|
|
|
"destroying RX queue %d\n", rx_queue->queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
2009-11-29 03:43:56 +00:00
|
|
|
efx_nic_remove_rx(rx_queue);
|
2008-04-27 11:55:59 +00:00
|
|
|
|
|
|
|
kfree(rx_queue->buffer);
|
|
|
|
rx_queue->buffer = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
module_param(rx_alloc_method, int, 0644);
|
|
|
|
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
|
|
|
|
|
|
|
|
module_param(rx_refill_threshold, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(rx_refill_threshold,
|
|
|
|
"RX descriptor ring fast/slow fill threshold (%)");
|
|
|
|
|