|
|
|
@ -178,31 +178,9 @@ struct fm_port_fqs {
|
|
|
|
|
/* All the dpa bps in use at any moment */
|
|
|
|
|
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
|
|
|
|
|
|
|
|
|
|
/* The raw buffer size must be cacheline aligned */
|
|
|
|
|
#define DPAA_BP_RAW_SIZE 4096
|
|
|
|
|
/* When using more than one buffer pool, the raw sizes are as follows:
|
|
|
|
|
* 1 bp: 4KB
|
|
|
|
|
* 2 bp: 2KB, 4KB
|
|
|
|
|
* 3 bp: 1KB, 2KB, 4KB
|
|
|
|
|
* 4 bp: 1KB, 2KB, 4KB, 8KB
|
|
|
|
|
*/
|
|
|
|
|
static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
|
|
|
|
|
{
|
|
|
|
|
size_t res = DPAA_BP_RAW_SIZE / 4;
|
|
|
|
|
u8 i;
|
|
|
|
|
|
|
|
|
|
for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
|
|
|
|
|
res *= 2;
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
|
|
|
|
|
* even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
|
|
|
|
|
* via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
|
|
|
|
|
* half-page-aligned buffers, so we reserve some more space for start-of-buffer
|
|
|
|
|
* alignment.
|
|
|
|
|
*/
|
|
|
|
|
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
|
|
|
|
|
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
|
|
|
|
|
|
|
|
|
|
static int dpaa_max_frm;
|
|
|
|
|
|
|
|
|
@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
|
|
|
|
|
/* Allow the Fman (Tx) port to process in-flight frames before we
|
|
|
|
|
* try switching it off.
|
|
|
|
|
*/
|
|
|
|
|
usleep_range(5000, 10000);
|
|
|
|
|
msleep(200);
|
|
|
|
|
|
|
|
|
|
err = mac_dev->stop(mac_dev);
|
|
|
|
|
if (err < 0)
|
|
|
|
@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
|
|
|
|
|
phy_disconnect(net_dev->phydev);
|
|
|
|
|
net_dev->phydev = NULL;
|
|
|
|
|
|
|
|
|
|
msleep(200);
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
|
|
|
|
|
|
|
|
|
|
static void dpaa_bps_free(struct dpaa_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA_BPS_NUM; i++)
|
|
|
|
|
dpaa_bp_free(priv->dpaa_bps[i]);
|
|
|
|
|
dpaa_bp_free(priv->dpaa_bp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Use multiple WQs for FQ assignment:
|
|
|
|
@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
|
|
|
|
|
qman_release_pool(rx_pool_channel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void dpaa_eth_add_channel(u16 channel)
|
|
|
|
|
static void dpaa_eth_add_channel(u16 channel, struct device *dev)
|
|
|
|
|
{
|
|
|
|
|
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
|
|
|
|
|
const cpumask_t *cpus = qman_affine_cpus();
|
|
|
|
@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
|
|
|
|
|
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
|
|
|
|
|
portal = qman_get_affine_portal(cpu);
|
|
|
|
|
qman_p_static_dequeue_add(portal, pool);
|
|
|
|
|
qman_start_using_portal(portal, dev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
|
|
|
|
size_t count, struct dpaa_fq *errq,
|
|
|
|
|
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
|
|
|
|
|
struct dpaa_fq *errq,
|
|
|
|
|
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
|
|
|
|
|
struct dpaa_buffer_layout *buf_layout)
|
|
|
|
|
{
|
|
|
|
|
struct fman_buffer_prefix_content buf_prefix_content;
|
|
|
|
|
struct fman_port_rx_params *rx_p;
|
|
|
|
|
struct fman_port_params params;
|
|
|
|
|
int i, err;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
|
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
|
|
|
|
@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
|
|
|
|
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
|
|
|
|
|
rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
|
rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
|
|
|
|
|
rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
|
|
|
|
|
}
|
|
|
|
|
rx_p->ext_buf_pools.num_of_pools_used = 1;
|
|
|
|
|
rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
|
|
|
|
|
rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
|
|
|
|
|
|
|
|
|
|
err = fman_port_config(port, ¶ms);
|
|
|
|
|
if (err) {
|
|
|
|
@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
|
|
|
|
|
struct dpaa_bp **bps, size_t count,
|
|
|
|
|
struct dpaa_bp *bp,
|
|
|
|
|
struct fm_port_fqs *port_fqs,
|
|
|
|
|
struct dpaa_buffer_layout *buf_layout,
|
|
|
|
|
struct device *dev)
|
|
|
|
@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
|
|
|
|
|
err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
|
|
|
|
|
port_fqs->rx_defq, port_fqs->rx_pcdq,
|
|
|
|
|
&buf_layout[RX]);
|
|
|
|
|
|
|
|
|
@ -1335,13 +1310,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
|
|
|
|
|
vaddr = phys_to_virt(qm_fd_addr(fd));
|
|
|
|
|
sgt = vaddr + qm_fd_get_offset(fd);
|
|
|
|
|
|
|
|
|
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
|
|
|
|
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
|
|
|
|
dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
|
|
|
|
|
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
|
|
dpaa_release_sgt_members(sgt);
|
|
|
|
|
|
|
|
|
|
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
|
|
|
|
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
|
|
|
|
addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
|
|
|
|
|
virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
|
|
|
|
|
netdev_err(net_dev, "DMA mapping failed\n");
|
|
|
|
|
return;
|
|
|
|
@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
|
|
|
|
|
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
|
|
|
|
|
struct sk_buff *skb,
|
|
|
|
|
struct qm_fd *fd,
|
|
|
|
|
char *parse_results)
|
|
|
|
|
void *parse_results)
|
|
|
|
|
{
|
|
|
|
|
struct fman_prs_result *parse_result;
|
|
|
|
|
u16 ethertype = ntohs(skb->protocol);
|
|
|
|
@ -1491,21 +1467,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
|
|
|
|
|
struct net_device *net_dev = dpaa_bp->priv->net_dev;
|
|
|
|
|
struct bm_buffer bmb[8];
|
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
void *new_buf;
|
|
|
|
|
struct page *p;
|
|
|
|
|
u8 i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
|
new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
|
|
|
|
|
if (unlikely(!new_buf)) {
|
|
|
|
|
netdev_err(net_dev,
|
|
|
|
|
"netdev_alloc_frag() failed, size %zu\n",
|
|
|
|
|
dpaa_bp->raw_size);
|
|
|
|
|
p = dev_alloc_pages(0);
|
|
|
|
|
if (unlikely(!p)) {
|
|
|
|
|
netdev_err(net_dev, "dev_alloc_pages() failed\n");
|
|
|
|
|
goto release_previous_buffs;
|
|
|
|
|
}
|
|
|
|
|
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
|
|
|
|
|
|
|
|
|
|
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
|
|
|
|
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
|
|
|
|
addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
|
|
|
|
|
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
|
|
|
|
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
|
|
|
|
|
addr))) {
|
|
|
|
|
netdev_err(net_dev, "DMA map failed\n");
|
|
|
|
@ -1583,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
struct dpaa_bp *dpaa_bp;
|
|
|
|
|
int *countptr;
|
|
|
|
|
int res, i;
|
|
|
|
|
int res;
|
|
|
|
|
|
|
|
|
|
dpaa_bp = priv->dpaa_bp;
|
|
|
|
|
if (!dpaa_bp)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
|
|
|
|
res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
|
|
|
|
|
if (res)
|
|
|
|
|
return res;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA_BPS_NUM; i++) {
|
|
|
|
|
dpaa_bp = priv->dpaa_bps[i];
|
|
|
|
|
if (!dpaa_bp)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
|
|
|
|
res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
|
|
|
|
|
if (res)
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1602,30 +1574,62 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
|
|
|
|
|
* Skb freeing is not handled here.
|
|
|
|
|
*
|
|
|
|
|
* This function may be called on error paths in the Tx function, so guard
|
|
|
|
|
* against cases when not all fd relevant fields were filled in.
|
|
|
|
|
* against cases when not all fd relevant fields were filled in. To avoid
|
|
|
|
|
* reading the invalid transmission timestamp for the error paths set ts to
|
|
|
|
|
* false.
|
|
|
|
|
*
|
|
|
|
|
* Return the skb backpointer, since for S/G frames the buffer containing it
|
|
|
|
|
* gets freed here.
|
|
|
|
|
*/
|
|
|
|
|
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
|
|
|
|
const struct qm_fd *fd)
|
|
|
|
|
const struct qm_fd *fd, bool ts)
|
|
|
|
|
{
|
|
|
|
|
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
|
|
|
|
|
struct device *dev = priv->net_dev->dev.parent;
|
|
|
|
|
struct skb_shared_hwtstamps shhwtstamps;
|
|
|
|
|
dma_addr_t addr = qm_fd_addr(fd);
|
|
|
|
|
void *vaddr = phys_to_virt(addr);
|
|
|
|
|
const struct qm_sg_entry *sgt;
|
|
|
|
|
struct sk_buff **skbh, *skb;
|
|
|
|
|
int nr_frags, i;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
u64 ns;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
skbh = (struct sk_buff **)phys_to_virt(addr);
|
|
|
|
|
skb = *skbh;
|
|
|
|
|
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
|
|
|
|
dma_unmap_page(priv->tx_dma_dev, addr,
|
|
|
|
|
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
|
|
|
|
dma_dir);
|
|
|
|
|
|
|
|
|
|
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
|
|
|
|
/* The sgt buffer has been allocated with netdev_alloc_frag(),
|
|
|
|
|
* it's from lowmem.
|
|
|
|
|
*/
|
|
|
|
|
sgt = vaddr + qm_fd_get_offset(fd);
|
|
|
|
|
|
|
|
|
|
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
|
|
|
|
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
|
|
|
|
|
qm_sg_entry_get_len(&sgt[0]), dma_dir);
|
|
|
|
|
|
|
|
|
|
/* remaining pages were mapped with skb_frag_dma_map() */
|
|
|
|
|
for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
|
|
|
|
|
!qm_sg_entry_is_final(&sgt[i - 1]); i++) {
|
|
|
|
|
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
|
|
|
|
|
|
|
|
|
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
|
|
|
|
|
qm_sg_entry_get_len(&sgt[i]), dma_dir);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
dma_unmap_single(priv->tx_dma_dev, addr,
|
|
|
|
|
priv->tx_headroom + qm_fd_get_length(fd),
|
|
|
|
|
dma_dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
skb = *(struct sk_buff **)vaddr;
|
|
|
|
|
|
|
|
|
|
/* DMA unmapping is required before accessing the HW provided info */
|
|
|
|
|
if (ts && priv->tx_tstamp &&
|
|
|
|
|
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
|
|
|
|
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
|
|
|
|
|
|
|
|
|
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
|
|
|
|
|
if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
|
|
|
|
|
&ns)) {
|
|
|
|
|
shhwtstamps.hwtstamp = ns_to_ktime(ns);
|
|
|
|
|
skb_tstamp_tx(skb, &shhwtstamps);
|
|
|
|
@ -1634,35 +1638,9 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
dma_unmap_single(priv->tx_dma_dev, addr,
|
|
|
|
|
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
|
|
|
|
dma_dir);
|
|
|
|
|
|
|
|
|
|
/* The sgt buffer has been allocated with netdev_alloc_frag(),
|
|
|
|
|
* it's from lowmem.
|
|
|
|
|
*/
|
|
|
|
|
sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
|
|
|
|
|
|
|
|
|
|
/* sgt[0] is from lowmem, was dma_map_single()-ed */
|
|
|
|
|
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
|
|
|
|
|
qm_sg_entry_get_len(&sgt[0]), dma_dir);
|
|
|
|
|
|
|
|
|
|
/* remaining pages were mapped with skb_frag_dma_map() */
|
|
|
|
|
for (i = 1; i <= nr_frags; i++) {
|
|
|
|
|
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
|
|
|
|
|
|
|
|
|
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
|
|
|
|
|
qm_sg_entry_get_len(&sgt[i]), dma_dir);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Free the page frag that we allocated on Tx */
|
|
|
|
|
skb_free_frag(phys_to_virt(addr));
|
|
|
|
|
} else {
|
|
|
|
|
dma_unmap_single(priv->tx_dma_dev, addr,
|
|
|
|
|
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
|
|
|
|
|
}
|
|
|
|
|
if (qm_fd_get_format(fd) == qm_fd_sg)
|
|
|
|
|
/* Free the page that we allocated on Tx for the SGT */
|
|
|
|
|
free_pages((unsigned long)vaddr, 0);
|
|
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
|
}
|
|
|
|
@ -1717,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
|
|
|
|
|
return skb;
|
|
|
|
|
|
|
|
|
|
free_buffer:
|
|
|
|
|
skb_free_frag(vaddr);
|
|
|
|
|
free_pages((unsigned long)vaddr, 0);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1764,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|
|
|
|
goto free_buffers;
|
|
|
|
|
|
|
|
|
|
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
|
|
|
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
|
|
|
|
|
dpaa_bp->size, DMA_FROM_DEVICE);
|
|
|
|
|
dma_unmap_page(priv->rx_dma_dev, sg_addr,
|
|
|
|
|
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
|
|
|
|
if (!skb) {
|
|
|
|
|
sz = dpaa_bp->size +
|
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
@ -1817,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|
|
|
|
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
|
|
|
|
|
|
|
|
|
|
/* free the SG table buffer */
|
|
|
|
|
skb_free_frag(vaddr);
|
|
|
|
|
free_pages((unsigned long)vaddr, 0);
|
|
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
|
|
|
|
|
@ -1834,7 +1812,7 @@ free_buffers:
|
|
|
|
|
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
|
|
|
|
|
sg_addr = qm_sg_addr(&sgt[i]);
|
|
|
|
|
sg_vaddr = phys_to_virt(sg_addr);
|
|
|
|
|
skb_free_frag(sg_vaddr);
|
|
|
|
|
free_pages((unsigned long)sg_vaddr, 0);
|
|
|
|
|
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
|
|
|
|
if (dpaa_bp) {
|
|
|
|
|
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
|
|
|
@ -1845,7 +1823,7 @@ free_buffers:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* free the SGT fragment */
|
|
|
|
|
skb_free_frag(vaddr);
|
|
|
|
|
free_pages((unsigned long)vaddr, 0);
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
@ -1856,7 +1834,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
|
|
|
|
{
|
|
|
|
|
struct net_device *net_dev = priv->net_dev;
|
|
|
|
|
enum dma_data_direction dma_dir;
|
|
|
|
|
unsigned char *buffer_start;
|
|
|
|
|
unsigned char *buff_start;
|
|
|
|
|
struct sk_buff **skbh;
|
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
int err;
|
|
|
|
@ -1865,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
|
|
|
|
* available, so just use that for offset.
|
|
|
|
|
*/
|
|
|
|
|
fd->bpid = FSL_DPAA_BPID_INV;
|
|
|
|
|
buffer_start = skb->data - priv->tx_headroom;
|
|
|
|
|
buff_start = skb->data - priv->tx_headroom;
|
|
|
|
|
dma_dir = DMA_TO_DEVICE;
|
|
|
|
|
|
|
|
|
|
skbh = (struct sk_buff **)buffer_start;
|
|
|
|
|
skbh = (struct sk_buff **)buff_start;
|
|
|
|
|
*skbh = skb;
|
|
|
|
|
|
|
|
|
|
/* Enable L3/L4 hardware checksum computation.
|
|
|
|
@ -1877,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
|
|
|
|
* need to write into the skb.
|
|
|
|
|
*/
|
|
|
|
|
err = dpaa_enable_tx_csum(priv, skb, fd,
|
|
|
|
|
((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
|
|
|
|
|
buff_start + DPAA_TX_PRIV_DATA_SIZE);
|
|
|
|
|
if (unlikely(err < 0)) {
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
|
|
|
|
@ -1890,8 +1868,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
|
|
|
|
|
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
|
|
|
|
|
|
|
|
|
|
/* Map the entire buffer size that may be seen by FMan, but no more */
|
|
|
|
|
addr = dma_map_single(priv->tx_dma_dev, skbh,
|
|
|
|
|
skb_tail_pointer(skb) - buffer_start, dma_dir);
|
|
|
|
|
addr = dma_map_single(priv->tx_dma_dev, buff_start,
|
|
|
|
|
priv->tx_headroom + skb->len, dma_dir);
|
|
|
|
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
|
|
|
|
@ -1910,21 +1888,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|
|
|
|
struct net_device *net_dev = priv->net_dev;
|
|
|
|
|
struct qm_sg_entry *sgt;
|
|
|
|
|
struct sk_buff **skbh;
|
|
|
|
|
int i, j, err, sz;
|
|
|
|
|
void *buffer_start;
|
|
|
|
|
void *buff_start;
|
|
|
|
|
skb_frag_t *frag;
|
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
size_t frag_len;
|
|
|
|
|
void *sgt_buf;
|
|
|
|
|
struct page *p;
|
|
|
|
|
int i, j, err;
|
|
|
|
|
|
|
|
|
|
/* get a page frag to store the SGTable */
|
|
|
|
|
sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
|
|
|
|
|
sgt_buf = netdev_alloc_frag(sz);
|
|
|
|
|
if (unlikely(!sgt_buf)) {
|
|
|
|
|
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
|
|
|
|
|
sz);
|
|
|
|
|
/* get a page to store the SGTable */
|
|
|
|
|
p = dev_alloc_pages(0);
|
|
|
|
|
if (unlikely(!p)) {
|
|
|
|
|
netdev_err(net_dev, "dev_alloc_pages() failed\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
buff_start = page_address(p);
|
|
|
|
|
|
|
|
|
|
/* Enable L3/L4 hardware checksum computation.
|
|
|
|
|
*
|
|
|
|
@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|
|
|
|
* need to write into the skb.
|
|
|
|
|
*/
|
|
|
|
|
err = dpaa_enable_tx_csum(priv, skb, fd,
|
|
|
|
|
sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
|
|
|
|
|
buff_start + DPAA_TX_PRIV_DATA_SIZE);
|
|
|
|
|
if (unlikely(err < 0)) {
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
|
|
|
|
@ -1941,7 +1918,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* SGT[0] is used by the linear part */
|
|
|
|
|
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
|
|
|
|
|
sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
|
|
|
|
|
frag_len = skb_headlen(skb);
|
|
|
|
|
qm_sg_entry_set_len(&sgt[0], frag_len);
|
|
|
|
|
sgt[0].bpid = FSL_DPAA_BPID_INV;
|
|
|
|
@ -1979,15 +1956,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|
|
|
|
/* Set the final bit in the last used entry of the SGT */
|
|
|
|
|
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
|
|
|
|
|
|
|
|
|
|
/* set fd offset to priv->tx_headroom */
|
|
|
|
|
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
|
|
|
|
|
|
|
|
|
|
/* DMA map the SGT page */
|
|
|
|
|
buffer_start = (void *)sgt - priv->tx_headroom;
|
|
|
|
|
skbh = (struct sk_buff **)buffer_start;
|
|
|
|
|
skbh = (struct sk_buff **)buff_start;
|
|
|
|
|
*skbh = skb;
|
|
|
|
|
|
|
|
|
|
addr = dma_map_single(priv->tx_dma_dev, buffer_start,
|
|
|
|
|
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
|
|
|
|
|
addr = dma_map_page(priv->tx_dma_dev, p, 0,
|
|
|
|
|
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
|
|
|
|
|
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
|
|
|
|
|
netdev_err(priv->net_dev, "DMA mapping failed\n");
|
|
|
|
|
err = -EINVAL;
|
|
|
|
@ -2007,7 +1984,7 @@ sg_map_failed:
|
|
|
|
|
qm_sg_entry_get_len(&sgt[j]), dma_dir);
|
|
|
|
|
sg0_map_failed:
|
|
|
|
|
csum_failed:
|
|
|
|
|
skb_free_frag(sgt_buf);
|
|
|
|
|
free_pages((unsigned long)buff_start, 0);
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|
|
|
|
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
|
|
dpaa_cleanup_tx_fd(priv, &fd);
|
|
|
|
|
dpaa_cleanup_tx_fd(priv, &fd, false);
|
|
|
|
|
skb_to_fd_failed:
|
|
|
|
|
enomem:
|
|
|
|
|
percpu_stats->tx_errors++;
|
|
|
|
@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
|
|
|
|
|
|
|
|
|
|
percpu_priv->stats.tx_errors++;
|
|
|
|
|
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd);
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd, false);
|
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
|
|
|
|
|
|
|
|
|
|
percpu_priv->tx_confirm++;
|
|
|
|
|
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd);
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd, true);
|
|
|
|
|
|
|
|
|
|
consume_skb(skb);
|
|
|
|
|
}
|
|
|
|
@ -2304,8 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
|
|
|
|
return qman_cb_dqrr_consume;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
|
|
/* prefetch the first 64 bytes of the frame or the SGT start */
|
|
|
|
|
vaddr = phys_to_virt(addr);
|
|
|
|
@ -2427,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
|
|
|
|
|
percpu_priv->stats.tx_fifo_errors++;
|
|
|
|
|
count_ern(percpu_priv, msg);
|
|
|
|
|
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd);
|
|
|
|
|
skb = dpaa_cleanup_tx_fd(priv, fd, false);
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2660,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
|
|
|
|
|
{
|
|
|
|
|
dma_addr_t addr = bm_buf_addr(bmb);
|
|
|
|
|
|
|
|
|
|
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
|
|
|
|
|
dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
|
|
skb_free_frag(phys_to_virt(addr));
|
|
|
|
|
}
|
|
|
|
@ -2761,13 +2739,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
|
|
|
|
|
|
|
|
|
|
static int dpaa_eth_probe(struct platform_device *pdev)
|
|
|
|
|
{
|
|
|
|
|
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
|
|
|
|
|
struct net_device *net_dev = NULL;
|
|
|
|
|
struct dpaa_bp *dpaa_bp = NULL;
|
|
|
|
|
struct dpaa_fq *dpaa_fq, *tmp;
|
|
|
|
|
struct dpaa_priv *priv = NULL;
|
|
|
|
|
struct fm_port_fqs port_fqs;
|
|
|
|
|
struct mac_device *mac_dev;
|
|
|
|
|
int err = 0, i, channel;
|
|
|
|
|
int err = 0, channel;
|
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
|
|
dev = &pdev->dev;
|
|
|
|
@ -2856,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|
|
|
|
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
|
|
|
|
|
|
|
|
|
|
/* bp init */
|
|
|
|
|
for (i = 0; i < DPAA_BPS_NUM; i++) {
|
|
|
|
|
dpaa_bps[i] = dpaa_bp_alloc(dev);
|
|
|
|
|
if (IS_ERR(dpaa_bps[i])) {
|
|
|
|
|
err = PTR_ERR(dpaa_bps[i]);
|
|
|
|
|
goto free_dpaa_bps;
|
|
|
|
|
}
|
|
|
|
|
/* the raw size of the buffers used for reception */
|
|
|
|
|
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
|
|
|
|
|
/* avoid runtime computations by keeping the usable size here */
|
|
|
|
|
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
|
|
|
|
|
dpaa_bps[i]->priv = priv;
|
|
|
|
|
|
|
|
|
|
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
goto free_dpaa_bps;
|
|
|
|
|
priv->dpaa_bps[i] = dpaa_bps[i];
|
|
|
|
|
dpaa_bp = dpaa_bp_alloc(dev);
|
|
|
|
|
if (IS_ERR(dpaa_bp)) {
|
|
|
|
|
err = PTR_ERR(dpaa_bp);
|
|
|
|
|
goto free_dpaa_bps;
|
|
|
|
|
}
|
|
|
|
|
/* the raw size of the buffers used for reception */
|
|
|
|
|
dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
|
|
|
|
|
/* avoid runtime computations by keeping the usable size here */
|
|
|
|
|
dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
|
|
|
|
|
dpaa_bp->priv = priv;
|
|
|
|
|
|
|
|
|
|
err = dpaa_bp_alloc_pool(dpaa_bp);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
goto free_dpaa_bps;
|
|
|
|
|
priv->dpaa_bp = dpaa_bp;
|
|
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&priv->dpaa_fq_list);
|
|
|
|
|
|
|
|
|
@ -2898,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|
|
|
|
/* Walk the CPUs with affine portals
|
|
|
|
|
* and add this pool channel to each's dequeue mask.
|
|
|
|
|
*/
|
|
|
|
|
dpaa_eth_add_channel(priv->channel);
|
|
|
|
|
dpaa_eth_add_channel(priv->channel, &pdev->dev);
|
|
|
|
|
|
|
|
|
|
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
|
|
|
|
|
|
|
|
|
@ -2930,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|
|
|
|
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
|
|
|
|
|
|
|
|
|
|
/* All real interfaces need their ports initialized */
|
|
|
|
|
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
|
|
|
|
|
err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
|
|
|
|
|
&priv->buf_layout[0], dev);
|
|
|
|
|
if (err)
|
|
|
|
|
goto free_dpaa_fqs;
|
|
|
|
|