linux/drivers/net/ethernet/cavium/liquidio/response_manager.c
Raghu Vatsavayi f21fb3ed36 Add support of Cavium Liquidio ethernet adapters
Following patch V8 adds support for Cavium Liquidio pci express
based 10Gig ethernet adapters.
1) Consolidated all debug macros to either call dev_* or
   netdev_* macros directly, feedback from previous patch.
2) Changed soft commands to avoid crash when running
   in interrupt context.
3) Fixed link status not reflecting correct status when NetworkManager
   is running. Added MODULE_FIRMWARE declarations.

Following were the previous patches.
Patch V7:
1) Minor comments from v6 release regarding debug statements.
2) Fix for large multicast lists.
3) Fixed lockup issue if port initialization fails.
4) Enabled MSI by default.
https://patchwork.ozlabs.org/patch/464441/

Patch V6:
1) Addressed the uint64 vs u64 issue, feedback from previous patch.
2) Consolidated some receive processing routines.
3) Removed link status polling method.
https://patchwork.ozlabs.org/patch/459514/

Patch V5:
Based on the feedback from earlier patches with regards to
consolidation of common functions like device init, register
programming for cn66xx and cn68xx devices.
https://patchwork.ozlabs.org/patch/438979/

Patch V4:
Following were the changes based on the feedback from earlier patch:
1) Added mmiowb while synchronizing queue updates and other hw
   interactions.
2) Statistics will now be incremented non-atomically per each ring.
   liquidio_get_stats will add stats of each ring while reporting the
   total statistics counts.
3) Modified liquidio_ioctl  to return proper return codes.
4) Modified device naming to use standard Ethernet naming.
5) Global function names in the driver will have lio_/liquidio_/octeon_
   prefix.
6) Ethtool related changes for:
   Removed redundant stats and jiffies.
   Use default ethtool handler of link status.
   Speed setting will make use of ethtool_cmd_speed_set.
7) Added checks for pci_map_*  return codes.
8) Check for signals while waiting in interruptible mode
https://patchwork.ozlabs.org/patch/435073/

Patch v3:
Implemented feedback from previous patch like:
Removed NAPI Config and DEBUG config options, added BQL and xmit_more
support.
https://patchwork.ozlabs.org/patch/422749/

Patch V2:
Implemented feedback from previous patch.
https://patchwork.ozlabs.org/patch/413539/

First Patch:
https://patchwork.ozlabs.org/patch/412946/

Signed-off-by: Derek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: Satanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: Felix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: Robert Richter <Robert.Richter@caviumnetworks.com>
Signed-off-by: Aleksey Makarov <Aleksey.Makarov@caviumnetworks.com>
Signed-off-by: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-06-10 22:44:08 -07:00

179 lines
4.9 KiB
C

/**********************************************************************
* Author: Cavium, Inc.
*
* Contact: support@cavium.com
* Please include "LiquidIO" in the subject.
*
* Copyright (c) 2003-2015 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
int octeon_setup_response_list(struct octeon_device *oct)
{
int i, ret = 0;
struct cavium_wq *cwq;
for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
INIT_LIST_HEAD(&oct->response_list[i].head);
spin_lock_init(&oct->response_list[i].lock);
atomic_set(&oct->response_list[i].pending_req_count, 0);
}
oct->dma_comp_wq.wq = create_workqueue("dma-comp");
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
}
cwq = &oct->dma_comp_wq;
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
return ret;
}
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
flush_workqueue(oct->dma_comp_wq.wq);
destroy_workqueue(oct->dma_comp_wq.wq);
}
int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 force_quit)
{
struct octeon_response_list *ordered_sc_list;
struct octeon_soft_command *sc;
int request_complete = 0;
int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
u32 status;
u64 status64;
struct octeon_instr_rdp *rdp;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
do {
spin_lock_bh(&ordered_sc_list->lock);
if (ordered_sc_list->head.next == &ordered_sc_list->head) {
/* ordered_sc_list is empty; there is
* nothing to process
*/
spin_unlock_bh
(&ordered_sc_list->lock);
return 1;
}
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
status = OCTEON_REQUEST_PENDING;
/* check if octeon has finished DMA'ing a response
* to where rptr is pointing to
*/
dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
sc->cmd.rptr, rdp->rlen,
DMA_FROM_DEVICE);
status64 = *sc->status_word;
if (status64 != COMPLETION_WORD_INIT) {
if ((status64 & 0xff) != 0xff) {
octeon_swap_8B_data(&status64, 1);
if (((status64 & 0xff) != 0xff)) {
status = (u32)(status64 &
0xffffffffULL);
}
}
} else if (force_quit || (sc->timeout &&
time_after(jiffies, (unsigned long)sc->timeout))) {
status = OCTEON_REQUEST_TIMEOUT;
}
if (status != OCTEON_REQUEST_PENDING) {
/* we have received a response or we have timed out */
/* remove node from linked list */
list_del(&sc->node);
atomic_dec(&octeon_dev->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
spin_unlock_bh
(&ordered_sc_list->lock);
if (sc->callback)
sc->callback(octeon_dev, status,
sc->callback_arg);
request_complete++;
} else {
/* no response yet */
request_complete = 0;
spin_unlock_bh
(&ordered_sc_list->lock);
}
/* If we hit the Max Ordered requests to process every loop,
* we quit
* and let this function be invoked the next time the poll
* thread runs
* to process the remaining requests. This function can take up
* the entire CPU if there is no upper limit to the requests
* processed.
*/
if (request_complete >= resp_to_process)
break;
} while (request_complete);
return 0;
}
static void oct_poll_req_completion(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
}