Merge branch 'liquidio-adding-support-for-ethtool-set-ring-feature'
Intiyaz Basha says: ==================== liquidio: adding support for ethtool --set-ring feature Code reorganization is required for adding ethtool --set-ring feature. First seven patches are for code reorganization. The last patch is for adding this feature. Change Log: V1 -> V2 Only patch #8 was changed: unnecessary parentheses were removed in two if-statements in lio_ethtool_set_ringparam(). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
864a62596f
@ -36,8 +36,6 @@ struct octeon_cn23xx_vf {
|
||||
|
||||
#define CN23XX_MAILBOX_MSGPARAM_SIZE 6
|
||||
|
||||
#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100
|
||||
|
||||
void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
|
||||
|
||||
int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
|
||||
|
@ -364,3 +364,427 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
|
||||
destroy_workqueue(lio->rxq_status_wq.wq);
|
||||
}
|
||||
}
|
||||
|
||||
/* Runs in interrupt context. */
|
||||
static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
|
||||
{
|
||||
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
|
||||
struct net_device *netdev;
|
||||
struct lio *lio;
|
||||
|
||||
netdev = oct->props[iq->ifidx].netdev;
|
||||
|
||||
/* This is needed because the first IQ does not have
|
||||
* a netdev associated with it.
|
||||
*/
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
if (netif_is_multiqueue(netdev)) {
|
||||
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, iq_num))) {
|
||||
netif_wake_subqueue(netdev, iq->q_index);
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||
tx_restart, 1);
|
||||
}
|
||||
} else if (netif_queue_stopped(netdev) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, lio->txq))) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
|
||||
tx_restart, 1);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup output queue
|
||||
* @param oct octeon device
|
||||
* @param q_no which queue
|
||||
* @param num_descs how many descriptors
|
||||
* @param desc_size size of each descriptor
|
||||
* @param app_ctx application context
|
||||
*/
|
||||
static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
|
||||
int desc_size, void *app_ctx)
|
||||
{
|
||||
int ret_val;
|
||||
|
||||
dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
|
||||
/* droq creation and local register settings. */
|
||||
ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
|
||||
if (ret_val < 0)
|
||||
return ret_val;
|
||||
|
||||
if (ret_val == 1) {
|
||||
dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enable the droq queues */
|
||||
octeon_set_droq_pkt_op(oct, q_no, 1);
|
||||
|
||||
/* Send Credit for Octeon Output queues. Credits are always
|
||||
* sent after the output queue is enabled.
|
||||
*/
|
||||
writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/** Routine to push packets arriving on Octeon interface upto network layer.
|
||||
* @param oct_id - octeon device id.
|
||||
* @param skbuff - skbuff struct to be passed to network layer.
|
||||
* @param len - size of total data received.
|
||||
* @param rh - Control header associated with the packet
|
||||
* @param param - additional control data with the packet
|
||||
* @param arg - farg registered in droq_ops
|
||||
*/
|
||||
static void
|
||||
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
|
||||
void *skbuff,
|
||||
u32 len,
|
||||
union octeon_rh *rh,
|
||||
void *param,
|
||||
void *arg)
|
||||
{
|
||||
struct net_device *netdev = (struct net_device *)arg;
|
||||
struct octeon_droq *droq =
|
||||
container_of(param, struct octeon_droq, napi);
|
||||
struct sk_buff *skb = (struct sk_buff *)skbuff;
|
||||
struct skb_shared_hwtstamps *shhwtstamps;
|
||||
struct napi_struct *napi = param;
|
||||
u16 vtag = 0;
|
||||
u32 r_dh_off;
|
||||
u64 ns;
|
||||
|
||||
if (netdev) {
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
int packet_was_received;
|
||||
|
||||
/* Do not proceed if the interface is not in RUNNING state. */
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
|
||||
recv_buffer_free(skb);
|
||||
droq->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb->dev = netdev;
|
||||
|
||||
skb_record_rx_queue(skb, droq->q_no);
|
||||
if (likely(len > MIN_SKB_SIZE)) {
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
unsigned char *va;
|
||||
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
if (pg_info->page) {
|
||||
/* For Paged allocation use the frags */
|
||||
va = page_address(pg_info->page) +
|
||||
pg_info->page_offset;
|
||||
memcpy(skb->data, va, MIN_SKB_SIZE);
|
||||
skb_put(skb, MIN_SKB_SIZE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
pg_info->page,
|
||||
pg_info->page_offset +
|
||||
MIN_SKB_SIZE,
|
||||
len - MIN_SKB_SIZE,
|
||||
LIO_RXBUFFER_SZ);
|
||||
}
|
||||
} else {
|
||||
struct octeon_skb_page_info *pg_info =
|
||||
((struct octeon_skb_page_info *)(skb->cb));
|
||||
skb_copy_to_linear_data(skb, page_address(pg_info->page)
|
||||
+ pg_info->page_offset, len);
|
||||
skb_put(skb, len);
|
||||
put_page(pg_info->page);
|
||||
}
|
||||
|
||||
r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
|
||||
|
||||
if (oct->ptp_enable) {
|
||||
if (rh->r_dh.has_hwtstamp) {
|
||||
/* timestamp is included from the hardware at
|
||||
* the beginning of the packet.
|
||||
*/
|
||||
if (ifstate_check
|
||||
(lio,
|
||||
LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
|
||||
/* Nanoseconds are in the first 64-bits
|
||||
* of the packet.
|
||||
*/
|
||||
memcpy(&ns, (skb->data + r_dh_off),
|
||||
sizeof(ns));
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
shhwtstamps = skb_hwtstamps(skb);
|
||||
shhwtstamps->hwtstamp =
|
||||
ns_to_ktime(ns +
|
||||
lio->ptp_adjust);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rh->r_dh.has_hash) {
|
||||
__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
|
||||
u32 hash = be32_to_cpu(*hash_be);
|
||||
|
||||
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
}
|
||||
|
||||
skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
|
||||
if ((netdev->features & NETIF_F_RXCSUM) &&
|
||||
(((rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
|
||||
(!(rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
|
||||
/* checksum has already been verified */
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Setting Encapsulation field on basis of status received
|
||||
* from the firmware
|
||||
*/
|
||||
if (rh->r_dh.encap_on) {
|
||||
skb->encapsulation = 1;
|
||||
skb->csum_level = 1;
|
||||
droq->stats.rx_vxlan++;
|
||||
}
|
||||
|
||||
/* inbound VLAN tag */
|
||||
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
rh->r_dh.vlan) {
|
||||
u16 priority = rh->r_dh.priority;
|
||||
u16 vid = rh->r_dh.vlan;
|
||||
|
||||
vtag = (priority << VLAN_PRIO_SHIFT) | vid;
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
|
||||
}
|
||||
|
||||
packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
|
||||
|
||||
if (packet_was_received) {
|
||||
droq->stats.rx_bytes_received += len;
|
||||
droq->stats.rx_pkts_received++;
|
||||
} else {
|
||||
droq->stats.rx_dropped++;
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"droq:%d error rx_dropped:%llu\n",
|
||||
droq->q_no, droq->stats.rx_dropped);
|
||||
}
|
||||
|
||||
} else {
|
||||
recv_buffer_free(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief wrapper for calling napi_schedule
|
||||
* @param param parameters to pass to napi_schedule
|
||||
*
|
||||
* Used when scheduling on different CPUs
|
||||
*/
|
||||
static void napi_schedule_wrapper(void *param)
|
||||
{
|
||||
struct napi_struct *napi = param;
|
||||
|
||||
napi_schedule(napi);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief callback when receive interrupt occurs and we are in NAPI mode
|
||||
* @param arg pointer to octeon output queue
|
||||
*/
|
||||
static void liquidio_napi_drv_callback(void *arg)
|
||||
{
|
||||
struct octeon_device *oct;
|
||||
struct octeon_droq *droq = arg;
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
oct = droq->oct_dev;
|
||||
|
||||
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
|
||||
droq->cpu_id == this_cpu) {
|
||||
napi_schedule_irqoff(&droq->napi);
|
||||
} else {
|
||||
struct call_single_data *csd = &droq->csd;
|
||||
|
||||
csd->func = napi_schedule_wrapper;
|
||||
csd->info = &droq->napi;
|
||||
csd->flags = 0;
|
||||
|
||||
smp_call_function_single_async(droq->cpu_id, csd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Entry point for NAPI polling
|
||||
* @param napi NAPI structure
|
||||
* @param budget maximum number of items to process
|
||||
*/
|
||||
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct octeon_instr_queue *iq;
|
||||
struct octeon_device *oct;
|
||||
struct octeon_droq *droq;
|
||||
int tx_done = 0, iq_no;
|
||||
int work_done;
|
||||
|
||||
droq = container_of(napi, struct octeon_droq, napi);
|
||||
oct = droq->oct_dev;
|
||||
iq_no = droq->q_no;
|
||||
|
||||
/* Handle Droq descriptors */
|
||||
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
|
||||
POLL_EVENT_PROCESS_PKTS,
|
||||
budget);
|
||||
|
||||
/* Flush the instruction queue */
|
||||
iq = oct->instr_queue[iq_no];
|
||||
if (iq) {
|
||||
/* TODO: move this check to inside octeon_flush_iq,
|
||||
* once check_db_timeout is removed
|
||||
*/
|
||||
if (atomic_read(&iq->instr_pending))
|
||||
/* Process iq buffers with in the budget limits */
|
||||
tx_done = octeon_flush_iq(oct, iq, budget);
|
||||
else
|
||||
tx_done = 1;
|
||||
/* Update iq read-index rather than waiting for next interrupt.
|
||||
* Return back if tx_done is false.
|
||||
*/
|
||||
/* sub-queue status update */
|
||||
lio_update_txq_status(oct, iq_no);
|
||||
} else {
|
||||
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
|
||||
__func__, iq_no);
|
||||
}
|
||||
|
||||
#define MAX_REG_CNT 2000000U
|
||||
/* force enable interrupt if reg cnts are high to avoid wraparound */
|
||||
if (((work_done < budget) && (tx_done)) ||
|
||||
(iq->pkt_in_done >= MAX_REG_CNT) ||
|
||||
(droq->pkt_count >= MAX_REG_CNT)) {
|
||||
tx_done = 1;
|
||||
napi_complete_done(napi, work_done);
|
||||
|
||||
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
|
||||
POLL_EVENT_ENABLE_INTR, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (!tx_done) ? (budget) : (work_done);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup input and output queues
|
||||
* @param octeon_dev octeon device
|
||||
* @param ifidx Interface index
|
||||
*
|
||||
* Note: Queues are with respect to the octeon device. Thus
|
||||
* an input queue is for egress packets, and output queues
|
||||
* are for ingress packets.
|
||||
*/
|
||||
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
|
||||
{
|
||||
struct octeon_droq_ops droq_ops;
|
||||
struct net_device *netdev;
|
||||
struct octeon_droq *droq;
|
||||
struct napi_struct *napi;
|
||||
int cpu_id_modulus;
|
||||
int num_tx_descs;
|
||||
struct lio *lio;
|
||||
int retval = 0;
|
||||
int q, q_no;
|
||||
int cpu_id;
|
||||
|
||||
netdev = octeon_dev->props[ifidx].netdev;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
|
||||
memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
|
||||
|
||||
droq_ops.fptr = liquidio_push_packet;
|
||||
droq_ops.farg = netdev;
|
||||
|
||||
droq_ops.poll_mode = 1;
|
||||
droq_ops.napi_fn = liquidio_napi_drv_callback;
|
||||
cpu_id = 0;
|
||||
cpu_id_modulus = num_present_cpus();
|
||||
|
||||
/* set up DROQs. */
|
||||
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
|
||||
q_no = lio->linfo.rxpciq[q].s.q_no;
|
||||
dev_dbg(&octeon_dev->pci_dev->dev,
|
||||
"%s index:%d linfo.rxpciq.s.q_no:%d\n",
|
||||
__func__, q, q_no);
|
||||
retval = octeon_setup_droq(
|
||||
octeon_dev, q_no,
|
||||
CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx),
|
||||
CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx),
|
||||
NULL);
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
"%s : Runtime DROQ(RxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
droq = octeon_dev->droq[q_no];
|
||||
napi = &droq->napi;
|
||||
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
|
||||
(u64)netdev, (u64)octeon_dev);
|
||||
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
|
||||
|
||||
/* designate a CPU for this droq */
|
||||
droq->cpu_id = cpu_id;
|
||||
cpu_id++;
|
||||
if (cpu_id >= cpu_id_modulus)
|
||||
cpu_id = 0;
|
||||
|
||||
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
|
||||
}
|
||||
|
||||
if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
|
||||
/* 23XX PF/VF can send/recv control messages (via the first
|
||||
* PF/VF-owned droq) from the firmware even if the ethX
|
||||
* interface is down, so that's why poll_mode must be off
|
||||
* for the first droq.
|
||||
*/
|
||||
octeon_dev->droq[0]->ops.poll_mode = 0;
|
||||
}
|
||||
|
||||
/* set up IQs. */
|
||||
for (q = 0; q < lio->linfo.num_txpciq; q++) {
|
||||
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
|
||||
octeon_get_conf(octeon_dev), lio->ifidx);
|
||||
retval = octeon_setup_iq(octeon_dev, ifidx, q,
|
||||
lio->linfo.txpciq[q], num_tx_descs,
|
||||
netdev_get_tx_queue(netdev, q));
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
" %s : Runtime IQ(TxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* XPS */
|
||||
if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
|
||||
octeon_dev->ioq_vector) {
|
||||
struct octeon_ioq_vector *ioq_vector;
|
||||
|
||||
ioq_vector = &octeon_dev->ioq_vector[q];
|
||||
netif_set_xps_queue(netdev,
|
||||
&ioq_vector->affinity_mask,
|
||||
ioq_vector->iq_index);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -637,6 +637,9 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
|
||||
u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
|
||||
rx_pending = 0;
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
|
||||
return;
|
||||
|
||||
if (OCTEON_CN6XXX(oct)) {
|
||||
struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
|
||||
|
||||
@ -661,6 +664,126 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
|
||||
ering->rx_jumbo_max_pending = 0;
|
||||
}
|
||||
|
||||
static int lio_reset_queues(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct napi_struct *napi, *n;
|
||||
int i;
|
||||
|
||||
dev_dbg(&oct->pci_dev->dev, "%s:%d ifidx %d\n",
|
||||
__func__, __LINE__, lio->ifidx);
|
||||
|
||||
if (wait_for_pending_requests(oct))
|
||||
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
|
||||
|
||||
if (lio_wait_for_instr_fetch(oct))
|
||||
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
|
||||
|
||||
if (octeon_set_io_queues_off(oct)) {
|
||||
dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Disable the input and output queues now. No more packets will
|
||||
* arrive from Octeon.
|
||||
*/
|
||||
oct->fn_list.disable_io_queues(oct);
|
||||
/* Delete NAPI */
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
netif_napi_del(napi);
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & BIT_ULL(i)))
|
||||
continue;
|
||||
octeon_delete_droq(oct, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.iq & BIT_ULL(i)))
|
||||
continue;
|
||||
octeon_delete_instr_queue(oct, i);
|
||||
}
|
||||
|
||||
if (oct->fn_list.setup_device_regs(oct)) {
|
||||
dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (liquidio_setup_io_queues(oct, 0)) {
|
||||
dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Enable the input and output queues for this Octeon device */
|
||||
if (oct->fn_list.enable_io_queues(oct)) {
|
||||
dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lio_ethtool_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ering)
|
||||
{
|
||||
u32 rx_count, tx_count, rx_count_old, tx_count_old;
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
int stopped = 0;
|
||||
|
||||
if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
|
||||
return -EINVAL;
|
||||
|
||||
if (ering->rx_mini_pending || ering->rx_jumbo_pending)
|
||||
return -EINVAL;
|
||||
|
||||
rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
|
||||
CN23XX_MAX_OQ_DESCRIPTORS);
|
||||
tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
|
||||
CN23XX_MAX_IQ_DESCRIPTORS);
|
||||
|
||||
rx_count_old = oct->droq[0]->max_count;
|
||||
tx_count_old = oct->instr_queue[0]->max_count;
|
||||
|
||||
if (rx_count == rx_count_old && tx_count == tx_count_old)
|
||||
return 0;
|
||||
|
||||
ifstate_set(lio, LIO_IFSTATE_RESETTING);
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
netdev->netdev_ops->ndo_stop(netdev);
|
||||
stopped = 1;
|
||||
}
|
||||
|
||||
/* Change RX/TX DESCS count */
|
||||
if (tx_count != tx_count_old)
|
||||
CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
|
||||
tx_count);
|
||||
if (rx_count != rx_count_old)
|
||||
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
|
||||
rx_count);
|
||||
|
||||
if (lio_reset_queues(netdev))
|
||||
goto err_lio_reset_queues;
|
||||
|
||||
if (stopped)
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
|
||||
ifstate_reset(lio, LIO_IFSTATE_RESETTING);
|
||||
|
||||
return 0;
|
||||
|
||||
err_lio_reset_queues:
|
||||
if (tx_count != tx_count_old)
|
||||
CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
|
||||
tx_count_old);
|
||||
if (rx_count != rx_count_old)
|
||||
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
|
||||
rx_count_old);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u32 lio_get_msglevel(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
@ -779,6 +902,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
|
||||
struct net_device_stats *netstats = &netdev->stats;
|
||||
int i = 0, j;
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
|
||||
return;
|
||||
|
||||
netdev->netdev_ops->ndo_get_stats(netdev);
|
||||
octnet_get_link_stats(netdev);
|
||||
|
||||
@ -1043,6 +1169,9 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
|
||||
struct octeon_device *oct_dev = lio->oct_dev;
|
||||
int i = 0, j, vj;
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
|
||||
return;
|
||||
|
||||
netdev->netdev_ops->ndo_get_stats(netdev);
|
||||
/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
|
||||
data[i++] = CVM_CAST64(netstats->rx_packets);
|
||||
@ -2574,6 +2703,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_drvinfo = lio_get_drvinfo,
|
||||
.get_ringparam = lio_ethtool_get_ringparam,
|
||||
.set_ringparam = lio_ethtool_set_ringparam,
|
||||
.get_channels = lio_ethtool_get_channels,
|
||||
.set_phys_id = lio_set_phys_id,
|
||||
.get_eeprom_len = lio_get_eeprom_len,
|
||||
@ -2599,6 +2729,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_drvinfo = lio_get_vf_drvinfo,
|
||||
.get_ringparam = lio_ethtool_get_ringparam,
|
||||
.set_ringparam = lio_ethtool_set_ringparam,
|
||||
.get_channels = lio_ethtool_get_channels,
|
||||
.get_strings = lio_vf_get_strings,
|
||||
.get_ethtool_stats = lio_vf_get_ethtool_stats,
|
||||
|
@ -275,32 +275,6 @@ static void force_io_queues_off(struct octeon_device *oct)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief wait for all pending requests to complete
|
||||
* @param oct Pointer to Octeon device
|
||||
*
|
||||
* Called during shutdown sequence
|
||||
*/
|
||||
static int wait_for_pending_requests(struct octeon_device *oct)
|
||||
{
|
||||
int i, pcount = 0;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
pcount =
|
||||
atomic_read(&oct->response_list
|
||||
[OCTEON_ORDERED_SC_LIST].pending_req_count);
|
||||
if (pcount)
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (pcount)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Cause device to go quiet so it can be safely removed/reset/etc
|
||||
* @param oct Pointer to Octeon device
|
||||
@ -843,7 +817,8 @@ static void print_link_info(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
|
||||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
|
||||
ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
|
||||
struct oct_link_info *linfo = &lio->linfo;
|
||||
|
||||
if (linfo->link.s.link_up) {
|
||||
@ -932,39 +907,6 @@ static inline void update_link_status(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
/* Runs in interrupt context. */
|
||||
static void update_txq_status(struct octeon_device *oct, int iq_num)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct lio *lio;
|
||||
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
|
||||
|
||||
netdev = oct->props[iq->ifidx].netdev;
|
||||
|
||||
/* This is needed because the first IQ does not have
|
||||
* a netdev associated with it.
|
||||
*/
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
if (netif_is_multiqueue(netdev)) {
|
||||
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, iq_num))) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||
tx_restart, 1);
|
||||
netif_wake_subqueue(netdev, iq->q_index);
|
||||
}
|
||||
} else if (netif_queue_stopped(netdev) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, lio->txq))) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
||||
lio->txq, tx_restart, 1);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
|
||||
{
|
||||
@ -2257,43 +2199,6 @@ static int load_firmware(struct octeon_device *oct)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup output queue
|
||||
* @param oct octeon device
|
||||
* @param q_no which queue
|
||||
* @param num_descs how many descriptors
|
||||
* @param desc_size size of each descriptor
|
||||
* @param app_ctx application context
|
||||
*/
|
||||
static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
|
||||
int desc_size, void *app_ctx)
|
||||
{
|
||||
int ret_val = 0;
|
||||
|
||||
dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
|
||||
/* droq creation and local register settings. */
|
||||
ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
|
||||
if (ret_val < 0)
|
||||
return ret_val;
|
||||
|
||||
if (ret_val == 1) {
|
||||
dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
|
||||
return 0;
|
||||
}
|
||||
/* tasklet creation for the droq */
|
||||
|
||||
/* Enable the droq queues */
|
||||
octeon_set_droq_pkt_op(oct, q_no, 1);
|
||||
|
||||
/* Send Credit for Octeon Output queues. Credits are always
|
||||
* sent after the output queue is enabled.
|
||||
*/
|
||||
writel(oct->droq[q_no]->max_count,
|
||||
oct->droq[q_no]->pkts_credit_reg);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Callback for getting interface configuration
|
||||
* @param status status of request
|
||||
@ -2327,350 +2232,6 @@ static void if_cfg_callback(struct octeon_device *oct,
|
||||
wake_up_interruptible(&ctx->wc);
|
||||
}
|
||||
|
||||
/** Routine to push packets arriving on Octeon interface upto network layer.
|
||||
* @param oct_id - octeon device id.
|
||||
* @param skbuff - skbuff struct to be passed to network layer.
|
||||
* @param len - size of total data received.
|
||||
* @param rh - Control header associated with the packet
|
||||
* @param param - additional control data with the packet
|
||||
* @param arg - farg registered in droq_ops
|
||||
*/
|
||||
static void
|
||||
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
|
||||
void *skbuff,
|
||||
u32 len,
|
||||
union octeon_rh *rh,
|
||||
void *param,
|
||||
void *arg)
|
||||
{
|
||||
struct napi_struct *napi = param;
|
||||
struct sk_buff *skb = (struct sk_buff *)skbuff;
|
||||
struct skb_shared_hwtstamps *shhwtstamps;
|
||||
u64 ns;
|
||||
u16 vtag = 0;
|
||||
u32 r_dh_off;
|
||||
struct net_device *netdev = (struct net_device *)arg;
|
||||
struct octeon_droq *droq = container_of(param, struct octeon_droq,
|
||||
napi);
|
||||
if (netdev) {
|
||||
int packet_was_received;
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
|
||||
/* Do not proceed if the interface is not in RUNNING state. */
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
|
||||
recv_buffer_free(skb);
|
||||
droq->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb->dev = netdev;
|
||||
|
||||
skb_record_rx_queue(skb, droq->q_no);
|
||||
if (likely(len > MIN_SKB_SIZE)) {
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
unsigned char *va;
|
||||
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
if (pg_info->page) {
|
||||
/* For Paged allocation use the frags */
|
||||
va = page_address(pg_info->page) +
|
||||
pg_info->page_offset;
|
||||
memcpy(skb->data, va, MIN_SKB_SIZE);
|
||||
skb_put(skb, MIN_SKB_SIZE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
pg_info->page,
|
||||
pg_info->page_offset +
|
||||
MIN_SKB_SIZE,
|
||||
len - MIN_SKB_SIZE,
|
||||
LIO_RXBUFFER_SZ);
|
||||
}
|
||||
} else {
|
||||
struct octeon_skb_page_info *pg_info =
|
||||
((struct octeon_skb_page_info *)(skb->cb));
|
||||
skb_copy_to_linear_data(skb, page_address(pg_info->page)
|
||||
+ pg_info->page_offset, len);
|
||||
skb_put(skb, len);
|
||||
put_page(pg_info->page);
|
||||
}
|
||||
|
||||
r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
|
||||
|
||||
if (oct->ptp_enable) {
|
||||
if (rh->r_dh.has_hwtstamp) {
|
||||
/* timestamp is included from the hardware at
|
||||
* the beginning of the packet.
|
||||
*/
|
||||
if (ifstate_check
|
||||
(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
|
||||
/* Nanoseconds are in the first 64-bits
|
||||
* of the packet.
|
||||
*/
|
||||
memcpy(&ns, (skb->data + r_dh_off),
|
||||
sizeof(ns));
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
shhwtstamps = skb_hwtstamps(skb);
|
||||
shhwtstamps->hwtstamp =
|
||||
ns_to_ktime(ns +
|
||||
lio->ptp_adjust);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rh->r_dh.has_hash) {
|
||||
__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
|
||||
u32 hash = be32_to_cpu(*hash_be);
|
||||
|
||||
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
}
|
||||
|
||||
skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
if ((netdev->features & NETIF_F_RXCSUM) &&
|
||||
(((rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
|
||||
(!(rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
|
||||
/* checksum has already been verified */
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Setting Encapsulation field on basis of status received
|
||||
* from the firmware
|
||||
*/
|
||||
if (rh->r_dh.encap_on) {
|
||||
skb->encapsulation = 1;
|
||||
skb->csum_level = 1;
|
||||
droq->stats.rx_vxlan++;
|
||||
}
|
||||
|
||||
/* inbound VLAN tag */
|
||||
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(rh->r_dh.vlan != 0)) {
|
||||
u16 vid = rh->r_dh.vlan;
|
||||
u16 priority = rh->r_dh.priority;
|
||||
|
||||
vtag = priority << 13 | vid;
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
|
||||
}
|
||||
|
||||
packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
|
||||
|
||||
if (packet_was_received) {
|
||||
droq->stats.rx_bytes_received += len;
|
||||
droq->stats.rx_pkts_received++;
|
||||
} else {
|
||||
droq->stats.rx_dropped++;
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"droq:%d error rx_dropped:%llu\n",
|
||||
droq->q_no, droq->stats.rx_dropped);
|
||||
}
|
||||
|
||||
} else {
|
||||
recv_buffer_free(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief wrapper for calling napi_schedule
|
||||
* @param param parameters to pass to napi_schedule
|
||||
*
|
||||
* Used when scheduling on different CPUs
|
||||
*/
|
||||
static void napi_schedule_wrapper(void *param)
|
||||
{
|
||||
struct napi_struct *napi = param;
|
||||
|
||||
napi_schedule(napi);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief callback when receive interrupt occurs and we are in NAPI mode
|
||||
* @param arg pointer to octeon output queue
|
||||
*/
|
||||
static void liquidio_napi_drv_callback(void *arg)
|
||||
{
|
||||
struct octeon_device *oct;
|
||||
struct octeon_droq *droq = arg;
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
oct = droq->oct_dev;
|
||||
|
||||
if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
|
||||
napi_schedule_irqoff(&droq->napi);
|
||||
} else {
|
||||
struct call_single_data *csd = &droq->csd;
|
||||
|
||||
csd->func = napi_schedule_wrapper;
|
||||
csd->info = &droq->napi;
|
||||
csd->flags = 0;
|
||||
|
||||
smp_call_function_single_async(droq->cpu_id, csd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Entry point for NAPI polling
|
||||
* @param napi NAPI structure
|
||||
* @param budget maximum number of items to process
|
||||
*/
|
||||
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct octeon_droq *droq;
|
||||
int work_done;
|
||||
int tx_done = 0, iq_no;
|
||||
struct octeon_instr_queue *iq;
|
||||
struct octeon_device *oct;
|
||||
|
||||
droq = container_of(napi, struct octeon_droq, napi);
|
||||
oct = droq->oct_dev;
|
||||
iq_no = droq->q_no;
|
||||
/* Handle Droq descriptors */
|
||||
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
|
||||
POLL_EVENT_PROCESS_PKTS,
|
||||
budget);
|
||||
|
||||
/* Flush the instruction queue */
|
||||
iq = oct->instr_queue[iq_no];
|
||||
if (iq) {
|
||||
if (atomic_read(&iq->instr_pending))
|
||||
/* Process iq buffers with in the budget limits */
|
||||
tx_done = octeon_flush_iq(oct, iq, budget);
|
||||
else
|
||||
tx_done = 1;
|
||||
/* Update iq read-index rather than waiting for next interrupt.
|
||||
* Return back if tx_done is false.
|
||||
*/
|
||||
update_txq_status(oct, iq_no);
|
||||
} else {
|
||||
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
|
||||
__func__, iq_no);
|
||||
}
|
||||
|
||||
/* force enable interrupt if reg cnts are high to avoid wraparound */
|
||||
if ((work_done < budget && tx_done) ||
|
||||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
|
||||
(droq->pkt_count >= MAX_REG_CNT)) {
|
||||
tx_done = 1;
|
||||
napi_complete_done(napi, work_done);
|
||||
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
|
||||
POLL_EVENT_ENABLE_INTR, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (!tx_done) ? (budget) : (work_done);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup input and output queues
|
||||
* @param octeon_dev octeon device
|
||||
* @param ifidx Interface Index
|
||||
*
|
||||
* Note: Queues are with respect to the octeon device. Thus
|
||||
* an input queue is for egress packets, and output queues
|
||||
* are for ingress packets.
|
||||
*/
|
||||
static inline int setup_io_queues(struct octeon_device *octeon_dev,
|
||||
int ifidx)
|
||||
{
|
||||
struct octeon_droq_ops droq_ops;
|
||||
struct net_device *netdev;
|
||||
int cpu_id;
|
||||
int cpu_id_modulus;
|
||||
struct octeon_droq *droq;
|
||||
struct napi_struct *napi;
|
||||
int q, q_no, retval = 0;
|
||||
struct lio *lio;
|
||||
int num_tx_descs;
|
||||
|
||||
netdev = octeon_dev->props[ifidx].netdev;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
|
||||
memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
|
||||
|
||||
droq_ops.fptr = liquidio_push_packet;
|
||||
droq_ops.farg = (void *)netdev;
|
||||
|
||||
droq_ops.poll_mode = 1;
|
||||
droq_ops.napi_fn = liquidio_napi_drv_callback;
|
||||
cpu_id = 0;
|
||||
cpu_id_modulus = num_present_cpus();
|
||||
|
||||
/* set up DROQs. */
|
||||
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
|
||||
q_no = lio->linfo.rxpciq[q].s.q_no;
|
||||
dev_dbg(&octeon_dev->pci_dev->dev,
|
||||
"setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
|
||||
q, q_no);
|
||||
retval = octeon_setup_droq(octeon_dev, q_no,
|
||||
CFG_GET_NUM_RX_DESCS_NIC_IF
|
||||
(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx),
|
||||
CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
|
||||
(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx), NULL);
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
"%s : Runtime DROQ(RxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
droq = octeon_dev->droq[q_no];
|
||||
napi = &droq->napi;
|
||||
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
|
||||
(u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
|
||||
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
|
||||
|
||||
/* designate a CPU for this droq */
|
||||
droq->cpu_id = cpu_id;
|
||||
cpu_id++;
|
||||
if (cpu_id >= cpu_id_modulus)
|
||||
cpu_id = 0;
|
||||
|
||||
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
|
||||
}
|
||||
|
||||
if (OCTEON_CN23XX_PF(octeon_dev)) {
|
||||
/* 23XX PF can receive control messages (via the first PF-owned
|
||||
* droq) from the firmware even if the ethX interface is down,
|
||||
* so that's why poll_mode must be off for the first droq.
|
||||
*/
|
||||
octeon_dev->droq[0]->ops.poll_mode = 0;
|
||||
}
|
||||
|
||||
/* set up IQs. */
|
||||
for (q = 0; q < lio->linfo.num_txpciq; q++) {
|
||||
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
|
||||
(octeon_dev),
|
||||
lio->ifidx);
|
||||
retval = octeon_setup_iq(octeon_dev, ifidx, q,
|
||||
lio->linfo.txpciq[q], num_tx_descs,
|
||||
netdev_get_tx_queue(netdev, q));
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
" %s : Runtime IQ(TxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (octeon_dev->ioq_vector) {
|
||||
struct octeon_ioq_vector *ioq_vector;
|
||||
|
||||
ioq_vector = &octeon_dev->ioq_vector[q];
|
||||
netif_set_xps_queue(netdev,
|
||||
&ioq_vector->affinity_mask,
|
||||
ioq_vector->iq_index);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Poll routine for checking transmit queue status
|
||||
* @param work work_struct data structure
|
||||
@ -2960,6 +2521,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
|
||||
|
||||
oct = lio->oct_dev;
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
|
||||
return stats;
|
||||
|
||||
for (i = 0; i < lio->linfo.num_txpciq; i++) {
|
||||
iq_no = lio->linfo.txpciq[i].s.q_no;
|
||||
iq_stats = &oct->instr_queue[iq_no]->stats;
|
||||
@ -4231,7 +3795,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
|
||||
*/
|
||||
lio->txq = lio->linfo.txpciq[0].s.q_no;
|
||||
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
|
||||
if (setup_io_queues(octeon_dev, i)) {
|
||||
if (liquidio_setup_io_queues(octeon_dev, i)) {
|
||||
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
|
||||
goto setup_nic_dev_fail;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
|
||||
{
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)oct->priv;
|
||||
int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
|
||||
int retry = MAX_IO_PENDING_PKT_COUNT;
|
||||
int pkt_cnt = 0, pending_pkts;
|
||||
int i;
|
||||
|
||||
@ -147,32 +147,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
|
||||
return pkt_cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief wait for all pending requests to complete
|
||||
* @param oct Pointer to Octeon device
|
||||
*
|
||||
* Called during shutdown sequence
|
||||
*/
|
||||
static int wait_for_pending_requests(struct octeon_device *oct)
|
||||
{
|
||||
int i, pcount = 0;
|
||||
|
||||
for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
|
||||
pcount = atomic_read(
|
||||
&oct->response_list[OCTEON_ORDERED_SC_LIST]
|
||||
.pending_req_count);
|
||||
if (pcount)
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (pcount)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Cause device to go quiet so it can be safely removed/reset/etc
|
||||
* @param oct Pointer to Octeon device
|
||||
@ -574,7 +548,8 @@ static void print_link_info(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
|
||||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
|
||||
ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
|
||||
struct oct_link_info *linfo = &lio->linfo;
|
||||
|
||||
if (linfo->link.s.link_up) {
|
||||
@ -673,30 +648,6 @@ static void update_link_status(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
static void update_txq_status(struct octeon_device *oct, int iq_num)
|
||||
{
|
||||
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
|
||||
struct net_device *netdev;
|
||||
struct lio *lio;
|
||||
|
||||
netdev = oct->props[iq->ifidx].netdev;
|
||||
lio = GET_LIO(netdev);
|
||||
if (netif_is_multiqueue(netdev)) {
|
||||
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, iq_num))) {
|
||||
netif_wake_subqueue(netdev, iq->q_index);
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||
tx_restart, 1);
|
||||
}
|
||||
} else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, lio->txq))) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
||||
lio->txq, tx_restart, 1);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
|
||||
{
|
||||
@ -1394,41 +1345,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
||||
check_txq_state(lio, skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup output queue
|
||||
* @param oct octeon device
|
||||
* @param q_no which queue
|
||||
* @param num_descs how many descriptors
|
||||
* @param desc_size size of each descriptor
|
||||
* @param app_ctx application context
|
||||
*/
|
||||
static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
|
||||
int desc_size, void *app_ctx)
|
||||
{
|
||||
int ret_val;
|
||||
|
||||
dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
|
||||
/* droq creation and local register settings. */
|
||||
ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
|
||||
if (ret_val < 0)
|
||||
return ret_val;
|
||||
|
||||
if (ret_val == 1) {
|
||||
dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enable the droq queues */
|
||||
octeon_set_droq_pkt_op(oct, q_no, 1);
|
||||
|
||||
/* Send Credit for Octeon Output queues. Credits are always
|
||||
* sent after the output queue is enabled.
|
||||
*/
|
||||
writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Callback for getting interface configuration
|
||||
* @param status status of request
|
||||
@ -1461,290 +1377,6 @@ static void if_cfg_callback(struct octeon_device *oct,
|
||||
wake_up_interruptible(&ctx->wc);
|
||||
}
|
||||
|
||||
/** Routine to push packets arriving on Octeon interface upto network layer.
|
||||
* @param oct_id - octeon device id.
|
||||
* @param skbuff - skbuff struct to be passed to network layer.
|
||||
* @param len - size of total data received.
|
||||
* @param rh - Control header associated with the packet
|
||||
* @param param - additional control data with the packet
|
||||
* @param arg - farg registered in droq_ops
|
||||
*/
|
||||
static void
|
||||
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
|
||||
void *skbuff,
|
||||
u32 len,
|
||||
union octeon_rh *rh,
|
||||
void *param,
|
||||
void *arg)
|
||||
{
|
||||
struct napi_struct *napi = param;
|
||||
struct octeon_droq *droq =
|
||||
container_of(param, struct octeon_droq, napi);
|
||||
struct net_device *netdev = (struct net_device *)arg;
|
||||
struct sk_buff *skb = (struct sk_buff *)skbuff;
|
||||
u16 vtag = 0;
|
||||
u32 r_dh_off;
|
||||
|
||||
if (netdev) {
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
int packet_was_received;
|
||||
|
||||
/* Do not proceed if the interface is not in RUNNING state. */
|
||||
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
|
||||
recv_buffer_free(skb);
|
||||
droq->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb->dev = netdev;
|
||||
|
||||
skb_record_rx_queue(skb, droq->q_no);
|
||||
if (likely(len > MIN_SKB_SIZE)) {
|
||||
struct octeon_skb_page_info *pg_info;
|
||||
unsigned char *va;
|
||||
|
||||
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
|
||||
if (pg_info->page) {
|
||||
/* For Paged allocation use the frags */
|
||||
va = page_address(pg_info->page) +
|
||||
pg_info->page_offset;
|
||||
memcpy(skb->data, va, MIN_SKB_SIZE);
|
||||
skb_put(skb, MIN_SKB_SIZE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
pg_info->page,
|
||||
pg_info->page_offset +
|
||||
MIN_SKB_SIZE,
|
||||
len - MIN_SKB_SIZE,
|
||||
LIO_RXBUFFER_SZ);
|
||||
}
|
||||
} else {
|
||||
struct octeon_skb_page_info *pg_info =
|
||||
((struct octeon_skb_page_info *)(skb->cb));
|
||||
skb_copy_to_linear_data(skb,
|
||||
page_address(pg_info->page) +
|
||||
pg_info->page_offset, len);
|
||||
skb_put(skb, len);
|
||||
put_page(pg_info->page);
|
||||
}
|
||||
|
||||
r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
|
||||
|
||||
if (rh->r_dh.has_hwtstamp)
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
|
||||
if (rh->r_dh.has_hash) {
|
||||
__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
|
||||
u32 hash = be32_to_cpu(*hash_be);
|
||||
|
||||
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
|
||||
r_dh_off -= BYTES_PER_DHLEN_UNIT;
|
||||
}
|
||||
|
||||
skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
|
||||
if ((netdev->features & NETIF_F_RXCSUM) &&
|
||||
(((rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
|
||||
(!(rh->r_dh.encap_on) &&
|
||||
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
|
||||
/* checksum has already been verified */
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Setting Encapsulation field on basis of status received
|
||||
* from the firmware
|
||||
*/
|
||||
if (rh->r_dh.encap_on) {
|
||||
skb->encapsulation = 1;
|
||||
skb->csum_level = 1;
|
||||
droq->stats.rx_vxlan++;
|
||||
}
|
||||
|
||||
/* inbound VLAN tag */
|
||||
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
rh->r_dh.vlan) {
|
||||
u16 priority = rh->r_dh.priority;
|
||||
u16 vid = rh->r_dh.vlan;
|
||||
|
||||
vtag = (priority << VLAN_PRIO_SHIFT) | vid;
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
|
||||
}
|
||||
|
||||
packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
|
||||
|
||||
if (packet_was_received) {
|
||||
droq->stats.rx_bytes_received += len;
|
||||
droq->stats.rx_pkts_received++;
|
||||
} else {
|
||||
droq->stats.rx_dropped++;
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"droq:%d error rx_dropped:%llu\n",
|
||||
droq->q_no, droq->stats.rx_dropped);
|
||||
}
|
||||
|
||||
} else {
|
||||
recv_buffer_free(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief callback when receive interrupt occurs and we are in NAPI mode
|
||||
* @param arg pointer to octeon output queue
|
||||
*/
|
||||
static void liquidio_vf_napi_drv_callback(void *arg)
|
||||
{
|
||||
struct octeon_droq *droq = arg;
|
||||
|
||||
napi_schedule_irqoff(&droq->napi);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Entry point for NAPI polling
|
||||
* @param napi NAPI structure
|
||||
* @param budget maximum number of items to process
|
||||
*/
|
||||
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct octeon_instr_queue *iq;
|
||||
struct octeon_device *oct;
|
||||
struct octeon_droq *droq;
|
||||
int tx_done = 0, iq_no;
|
||||
int work_done;
|
||||
|
||||
droq = container_of(napi, struct octeon_droq, napi);
|
||||
oct = droq->oct_dev;
|
||||
iq_no = droq->q_no;
|
||||
|
||||
/* Handle Droq descriptors */
|
||||
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
|
||||
POLL_EVENT_PROCESS_PKTS,
|
||||
budget);
|
||||
|
||||
/* Flush the instruction queue */
|
||||
iq = oct->instr_queue[iq_no];
|
||||
if (iq) {
|
||||
if (atomic_read(&iq->instr_pending))
|
||||
/* Process iq buffers with in the budget limits */
|
||||
tx_done = octeon_flush_iq(oct, iq, budget);
|
||||
else
|
||||
tx_done = 1;
|
||||
|
||||
/* Update iq read-index rather than waiting for next interrupt.
|
||||
* Return back if tx_done is false.
|
||||
*/
|
||||
update_txq_status(oct, iq_no);
|
||||
} else {
|
||||
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
|
||||
__func__, iq_no);
|
||||
}
|
||||
|
||||
/* force enable interrupt if reg cnts are high to avoid wraparound */
|
||||
if ((work_done < budget && tx_done) ||
|
||||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
|
||||
(droq->pkt_count >= MAX_REG_CNT)) {
|
||||
tx_done = 1;
|
||||
napi_complete_done(napi, work_done);
|
||||
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
|
||||
POLL_EVENT_ENABLE_INTR, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (!tx_done) ? (budget) : (work_done);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Setup input and output queues
|
||||
* @param octeon_dev octeon device
|
||||
* @param ifidx Interface index
|
||||
*
|
||||
* Note: Queues are with respect to the octeon device. Thus
|
||||
* an input queue is for egress packets, and output queues
|
||||
* are for ingress packets.
|
||||
*/
|
||||
static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
|
||||
{
|
||||
struct octeon_droq_ops droq_ops;
|
||||
struct net_device *netdev;
|
||||
int cpu_id_modulus;
|
||||
struct octeon_droq *droq;
|
||||
struct napi_struct *napi;
|
||||
int cpu_id;
|
||||
int num_tx_descs;
|
||||
struct lio *lio;
|
||||
int retval = 0;
|
||||
int q, q_no;
|
||||
|
||||
netdev = octeon_dev->props[ifidx].netdev;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
|
||||
memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
|
||||
|
||||
droq_ops.fptr = liquidio_push_packet;
|
||||
droq_ops.farg = netdev;
|
||||
|
||||
droq_ops.poll_mode = 1;
|
||||
droq_ops.napi_fn = liquidio_vf_napi_drv_callback;
|
||||
cpu_id = 0;
|
||||
cpu_id_modulus = num_present_cpus();
|
||||
|
||||
/* set up DROQs. */
|
||||
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
|
||||
q_no = lio->linfo.rxpciq[q].s.q_no;
|
||||
|
||||
retval = octeon_setup_droq(
|
||||
octeon_dev, q_no,
|
||||
CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx),
|
||||
CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
|
||||
lio->ifidx),
|
||||
NULL);
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
"%s : Runtime DROQ(RxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
droq = octeon_dev->droq[q_no];
|
||||
napi = &droq->napi;
|
||||
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
|
||||
|
||||
/* designate a CPU for this droq */
|
||||
droq->cpu_id = cpu_id;
|
||||
cpu_id++;
|
||||
if (cpu_id >= cpu_id_modulus)
|
||||
cpu_id = 0;
|
||||
|
||||
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
|
||||
}
|
||||
|
||||
/* 23XX VF can send/recv control messages (via the first VF-owned
|
||||
* droq) from the firmware even if the ethX interface is down,
|
||||
* so that's why poll_mode must be off for the first droq.
|
||||
*/
|
||||
octeon_dev->droq[0]->ops.poll_mode = 0;
|
||||
|
||||
/* set up IQs. */
|
||||
for (q = 0; q < lio->linfo.num_txpciq; q++) {
|
||||
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
|
||||
octeon_get_conf(octeon_dev), lio->ifidx);
|
||||
retval = octeon_setup_iq(octeon_dev, ifidx, q,
|
||||
lio->linfo.txpciq[q], num_tx_descs,
|
||||
netdev_get_tx_queue(netdev, q));
|
||||
if (retval) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
" %s : Runtime IQ(TxQ) creation failed.\n",
|
||||
__func__);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Net device open for LiquidIO
|
||||
* @param netdev network device
|
||||
@ -2002,6 +1634,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
|
||||
|
||||
oct = lio->oct_dev;
|
||||
|
||||
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
|
||||
return stats;
|
||||
|
||||
for (i = 0; i < lio->linfo.num_txpciq; i++) {
|
||||
iq_no = lio->linfo.txpciq[i].s.q_no;
|
||||
iq_stats = &oct->instr_queue[iq_no]->stats;
|
||||
@ -2973,7 +2608,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
|
||||
/* Copy MAC Address to OS network device structure */
|
||||
ether_addr_copy(netdev->dev_addr, mac);
|
||||
|
||||
if (setup_io_queues(octeon_dev, i)) {
|
||||
if (liquidio_setup_io_queues(octeon_dev, i)) {
|
||||
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
|
||||
goto setup_nic_dev_fail;
|
||||
}
|
||||
|
@ -71,13 +71,17 @@
|
||||
#define CN23XX_MAX_RINGS_PER_VF 8
|
||||
|
||||
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 512
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 2048
|
||||
#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512
|
||||
#define CN23XX_MIN_IQ_DESCRIPTORS 128
|
||||
#define CN23XX_DB_MIN 1
|
||||
#define CN23XX_DB_MAX 8
|
||||
#define CN23XX_DB_TIMEOUT 1
|
||||
|
||||
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 512
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 2048
|
||||
#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512
|
||||
#define CN23XX_MIN_OQ_DESCRIPTORS 128
|
||||
#define CN23XX_OQ_BUF_SIZE 1664
|
||||
#define CN23XX_OQ_PKTSPER_INTR 128
|
||||
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
|
||||
@ -163,6 +167,11 @@
|
||||
((cfg)->misc.oct_link_query_interval)
|
||||
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
|
||||
|
||||
#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \
|
||||
((cfg)->nic_if_cfg[idx].num_rx_descs = value)
|
||||
#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \
|
||||
((cfg)->nic_if_cfg[idx].num_tx_descs = value)
|
||||
|
||||
/* Max IOQs per OCTEON Link */
|
||||
#define MAX_IOQS_PER_NICIF 64
|
||||
|
||||
|
@ -418,7 +418,7 @@ static struct octeon_config default_cn23xx_conf = {
|
||||
/** IQ attributes */
|
||||
.iq = {
|
||||
.max_iqs = CN23XX_CFG_IO_QUEUES,
|
||||
.pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS *
|
||||
.pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS *
|
||||
CN23XX_CFG_IO_QUEUES),
|
||||
.instr_type = OCTEON_64BYTE_INSTR,
|
||||
.db_min = CN23XX_DB_MIN,
|
||||
@ -436,8 +436,8 @@ static struct octeon_config default_cn23xx_conf = {
|
||||
},
|
||||
|
||||
.num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
|
||||
.num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
|
||||
.num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
|
||||
.num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
|
||||
.num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
|
||||
.def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
|
||||
|
||||
/* For ethernet interface 0: Port cfg Attributes */
|
||||
@ -455,10 +455,10 @@ static struct octeon_config default_cn23xx_conf = {
|
||||
.num_rxqs = DEF_RXQS_PER_INTF,
|
||||
|
||||
/* Num of desc for rx rings */
|
||||
.num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
|
||||
.num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
|
||||
|
||||
/* Num of desc for tx rings */
|
||||
.num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
|
||||
.num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
|
||||
|
||||
/* SKB size, We need not change buf size even for Jumbo frames.
|
||||
* Octeon can send jumbo frames in 4 consecutive descriptors,
|
||||
@ -484,10 +484,10 @@ static struct octeon_config default_cn23xx_conf = {
|
||||
.num_rxqs = DEF_RXQS_PER_INTF,
|
||||
|
||||
/* Num of desc for rx rings */
|
||||
.num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
|
||||
.num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
|
||||
|
||||
/* Num of desc for tx rings */
|
||||
.num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
|
||||
.num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
|
||||
|
||||
/* SKB size, We need not change buf size even for Jumbo frames.
|
||||
* Octeon can send jumbo frames in 4 consecutive descriptors,
|
||||
|
@ -571,6 +571,8 @@ struct octeon_device {
|
||||
#define CHIP_CONF(oct, TYPE) \
|
||||
(((struct octeon_ ## TYPE *)((oct)->chip))->conf)
|
||||
|
||||
#define MAX_IO_PENDING_PKT_COUNT 100
|
||||
|
||||
/*------------------ Function Prototypes ----------------------*/
|
||||
|
||||
/** Initialize device list memory */
|
||||
|
@ -33,6 +33,7 @@
|
||||
#define LIO_IFSTATE_REGISTERED 0x02
|
||||
#define LIO_IFSTATE_RUNNING 0x04
|
||||
#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
|
||||
#define LIO_IFSTATE_RESETTING 0x10
|
||||
|
||||
struct oct_nic_stats_resp {
|
||||
u64 rh;
|
||||
@ -166,6 +167,8 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev);
|
||||
*/
|
||||
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
|
||||
|
||||
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx);
|
||||
|
||||
/**
|
||||
* \brief Register ethtool operations
|
||||
* @param netdev pointer to network device
|
||||
@ -448,4 +451,30 @@ static inline void ifstate_reset(struct lio *lio, int state_flag)
|
||||
atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief wait for all pending requests to complete
|
||||
* @param oct Pointer to Octeon device
|
||||
*
|
||||
* Called during shutdown sequence
|
||||
*/
|
||||
static inline int wait_for_pending_requests(struct octeon_device *oct)
|
||||
{
|
||||
int i, pcount = 0;
|
||||
|
||||
for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
|
||||
pcount = atomic_read(
|
||||
&oct->response_list[OCTEON_ORDERED_SC_LIST]
|
||||
.pending_req_count);
|
||||
if (pcount)
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (pcount)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user