Staging: benet: remove driver now that it is merged in drivers/net/

The benet driver is now in the proper place in drivers/net/benet, so we
can remove the staging version.

Acked-by: Sathya Perla <sathyap@serverengines.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Greg Kroah-Hartman 2009-03-18 09:22:17 -07:00
parent 8144737def
commit d0573facf2
39 changed files with 0 additions and 12237 deletions

View File

@ -73,8 +73,6 @@ source "drivers/staging/rt2860/Kconfig"
source "drivers/staging/rt2870/Kconfig" source "drivers/staging/rt2870/Kconfig"
source "drivers/staging/benet/Kconfig"
source "drivers/staging/comedi/Kconfig" source "drivers/staging/comedi/Kconfig"
source "drivers/staging/asus_oled/Kconfig" source "drivers/staging/asus_oled/Kconfig"

View File

@ -19,7 +19,6 @@ obj-$(CONFIG_AGNX) += agnx/
obj-$(CONFIG_OTUS) += otus/ obj-$(CONFIG_OTUS) += otus/
obj-$(CONFIG_RT2860) += rt2860/ obj-$(CONFIG_RT2860) += rt2860/
obj-$(CONFIG_RT2870) += rt2870/ obj-$(CONFIG_RT2870) += rt2870/
obj-$(CONFIG_BENET) += benet/
obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_ASUS_OLED) += asus_oled/ obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/ obj-$(CONFIG_PANEL) += panel/

View File

@ -1,7 +0,0 @@
config BENET
tristate "ServerEngines 10Gb NIC - BladeEngine"
depends on PCI && INET
select INET_LRO
help
This driver implements the NIC functionality for ServerEngines
10Gb network adapter BladeEngine (EC 3210).

View File

@ -1,6 +0,0 @@
SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
P: Subbu Seetharaman
M: subbus@serverengines.com
L: netdev@vger.kernel.org
W: http://www.serverengines.com
S: Supported

View File

@ -1,14 +0,0 @@
#
# Makefile to build the network driver for ServerEngine's BladeEngine
#
obj-$(CONFIG_BENET) += benet.o
benet-y := be_init.o \
be_int.o \
be_netif.o \
be_ethtool.o \
funcobj.o \
cq.o \
eq.o \
mpu.o \
eth.o

View File

@ -1,6 +0,0 @@
TODO:
- remove wrappers around common iowrite functions
- full netdev audit of common problems/issues
Please send all patches and questions to Subbu Seetharaman
<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>

View File

@ -1,82 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __asyncmesg_amap_h__
#define __asyncmesg_amap_h__
#include "fwcmd_common.h"
/* --- ASYNC_EVENT_CODES --- */
#define ASYNC_EVENT_CODE_LINK_STATE (1)
#define ASYNC_EVENT_CODE_ISCSI (2)
/* --- ASYNC_LINK_STATES --- */
#define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */
#define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */
/*
* The last 4 bytes of the async events have this common format. It allows
* the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
* asynchronous events. Both arrive on the same completion queue. This
* structure also contains the common fields used to decode the async event.
*/
struct BE_ASYNC_EVENT_TRAILER_AMAP {
u8 rsvd0[8]; /* DWORD 0 */
u8 event_code[8]; /* DWORD 0 */
u8 event_type[8]; /* DWORD 0 */
u8 rsvd1[6]; /* DWORD 0 */
u8 async_event; /* DWORD 0 */
u8 valid; /* DWORD 0 */
} __packed;
struct ASYNC_EVENT_TRAILER_AMAP {
u32 dw[1];
};
/*
* Applicable in Initiator, Target and NIC modes.
* A link state async event is seen by all device drivers as soon they
* create an MCC ring. Thereafter, anytime the link status changes the
* drivers will receive a link state async event. Notifications continue to
* be sent until a driver destroys its MCC ring. A link down event is
* reported when either port loses link. A link up event is reported
* when either port regains link. When BE's failover mechanism is enabled, a
* link down on the active port causes traffic to be diverted to the standby
* port by the BE's ARM firmware (assuming the standby port has link). In
* this case, the standy port assumes the active status. Note: when link is
* restored on the failed port, traffic continues on the currently active
* port. The ARM firmware does not attempt to 'fail back' traffic to
* the restored port.
*/
struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
u8 port0_link_status[8];
u8 port1_link_status[8];
u8 active_port[8];
u8 rsvd0[8]; /* DWORD 0 */
u8 port0_duplex[8];
u8 port0_speed[8];
u8 port1_duplex[8];
u8 port1_speed[8];
u8 port0_fault[8];
u8 port1_fault[8];
u8 rsvd1[2][8]; /* DWORD 2 */
struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
} __packed;
struct ASYNC_EVENT_LINK_STATE_AMAP {
u32 dw[4];
};
#endif /* __asyncmesg_amap_h__ */

View File

@ -1,134 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_cm_amap_h__
#define __be_cm_amap_h__
#include "be_common.h"
#include "etx_context.h"
#include "mpu_context.h"
/*
* --- CEV_WATERMARK_ENUM ---
* CQ/EQ Watermark Encodings. Encoded as number of free entries in
* Queue when Watermark is reached.
*/
#define CEV_WMARK_0 (0) /* Watermark when Queue full */
#define CEV_WMARK_16 (1) /* Watermark at 16 free entries */
#define CEV_WMARK_32 (2) /* Watermark at 32 free entries */
#define CEV_WMARK_48 (3) /* Watermark at 48 free entries */
#define CEV_WMARK_64 (4) /* Watermark at 64 free entries */
#define CEV_WMARK_80 (5) /* Watermark at 80 free entries */
#define CEV_WMARK_96 (6) /* Watermark at 96 free entries */
#define CEV_WMARK_112 (7) /* Watermark at 112 free entries */
#define CEV_WMARK_128 (8) /* Watermark at 128 free entries */
#define CEV_WMARK_144 (9) /* Watermark at 144 free entries */
#define CEV_WMARK_160 (10) /* Watermark at 160 free entries */
#define CEV_WMARK_176 (11) /* Watermark at 176 free entries */
#define CEV_WMARK_192 (12) /* Watermark at 192 free entries */
#define CEV_WMARK_208 (13) /* Watermark at 208 free entries */
#define CEV_WMARK_224 (14) /* Watermark at 224 free entries */
#define CEV_WMARK_240 (15) /* Watermark at 240 free entries */
/*
* --- CQ_CNT_ENUM ---
* Completion Queue Count Encodings.
*/
#define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */
#define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */
#define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */
/*
* --- EQ_CNT_ENUM ---
* Event Queue Count Encodings.
*/
#define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */
/*
* --- EQ_SIZE_ENUM ---
* Event Queue Entry Size Encoding.
*/
#define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */
#define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */
/*
* Completion Queue Context Table Entry. Contains the state of a CQ.
* Located in RAM within the CEV block.
*/
struct BE_CQ_CONTEXT_AMAP {
u8 Cidx[11]; /* DWORD 0 */
u8 Watermark[4]; /* DWORD 0 */
u8 NoDelay; /* DWORD 0 */
u8 EPIdx[11]; /* DWORD 0 */
u8 Count[2]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 SolEvent; /* DWORD 0 */
u8 Eventable; /* DWORD 0 */
u8 Pidx[11]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 EQID[7]; /* DWORD 1 */
u8 Func; /* DWORD 1 */
u8 WME; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
} __packed;
struct CQ_CONTEXT_AMAP {
u32 dw[2];
};
/*
* Event Queue Context Table Entry. Contains the state of an EQ.
* Located in RAM in the CEV block.
*/
struct BE_EQ_CONTEXT_AMAP {
u8 Cidx[13]; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 Func; /* DWORD 0 */
u8 EPIdx[13]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 rsvd1; /* DWORD 0 */
u8 Size; /* DWORD 0 */
u8 Pidx[13]; /* DWORD 1 */
u8 rsvd2[3]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 Count[3]; /* DWORD 1 */
u8 SolEvent; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
u8 Watermark[4]; /* DWORD 2 */
u8 WME; /* DWORD 2 */
u8 rsvd3[3]; /* DWORD 2 */
u8 EventVect[6]; /* DWORD 2 */
u8 rsvd4[2]; /* DWORD 2 */
u8 Delay[8]; /* DWORD 2 */
u8 rsvd5[6]; /* DWORD 2 */
u8 TMR; /* DWORD 2 */
u8 rsvd6; /* DWORD 2 */
u8 rsvd7[32]; /* DWORD 3 */
} __packed;
struct EQ_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __be_cm_amap_h__ */

View File

@ -1,53 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_common_amap_h__
#define __be_common_amap_h__
/* Physical Address. */
struct BE_PHYS_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct PHYS_ADDR_AMAP {
u32 dw[2];
};
/* Virtual Address. */
struct BE_VIRT_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct VIRT_ADDR_AMAP {
u32 dw[2];
};
/* Scatter gather element. */
struct BE_SGE_AMAP {
u8 addr_hi[32]; /* DWORD 0 */
u8 addr_lo[32]; /* DWORD 1 */
u8 rsvd0[32]; /* DWORD 2 */
u8 len[16]; /* DWORD 3 */
u8 rsvd1[16]; /* DWORD 3 */
} __packed;
struct SGE_AMAP {
u32 dw[4];
};
#endif /* __be_common_amap_h__ */

View File

@ -1,348 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_ethtool.c
*
* This file contains various functions that ethtool can use
* to talk to the driver and the BE H/W.
*/
#include "benet.h"
#include <linux/ethtool.h>
static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
/* net_device_stats */
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"rx_errors",
"tx_errors",
"rx_dropped",
"tx_dropped",
"multicast",
"collisions",
"rx_length_errors",
"rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
"rx_fifo_errors",
"rx_missed_errors",
"tx_aborted_errors",
"tx_carrier_errors",
"tx_fifo_errors",
"tx_heartbeat_errors",
"tx_window_errors",
"rx_compressed",
"tc_compressed",
/* BE driver Stats */
"bes_tx_reqs",
"bes_tx_fails",
"bes_fwd_reqs",
"bes_tx_wrbs",
"bes_interrupts",
"bes_events",
"bes_tx_events",
"bes_rx_events",
"bes_tx_compl",
"bes_rx_compl",
"bes_ethrx_post_fail",
"bes_802_3_dropped_frames",
"bes_802_3_malformed_frames",
"bes_rx_misc_pkts",
"bes_eth_tx_rate",
"bes_eth_rx_rate",
"Num Packets collected",
"Num Times Flushed",
};
#define NET_DEV_STATS_LEN \
(sizeof(struct net_device_stats)/sizeof(unsigned long))
#define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats)
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
strncpy(drvinfo->driver, be_driver_name, 32);
strncpy(drvinfo->version, be_drvr_ver, 32);
strncpy(drvinfo->fw_version, be_fw_ver, 32);
strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static int
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
coalesce->rx_coalesce_usecs = adapter->cur_eqd;
coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
coalesce->tx_coalesce_usecs = adapter->cur_eqd;
coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
return 0;
}
/*
* This routine is used to set interrup coalescing delay *as well as*
* the number of pkts to coalesce for LRO.
*/
static int
be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct be_eq_object *eq_objectp;
u32 max, min, cur;
int status;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
if (adapter->enable_aic == 0 &&
coalesce->use_adaptive_rx_coalesce == 1) {
/* if AIC is being turned on now, start with an EQD of 0 */
adapter->cur_eqd = 0;
}
adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
/* round off to nearest multiple of 8 */
max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
if (adapter->enable_aic) {
/* accept low and high if AIC is enabled */
if (max > MAX_EQD)
max = MAX_EQD;
if (min > max)
min = max;
adapter->max_eqd = max;
adapter->min_eqd = min;
if (adapter->cur_eqd > max)
adapter->cur_eqd = max;
if (adapter->cur_eqd < min)
adapter->cur_eqd = min;
} else {
/* accept specified coalesce_usecs only if AIC is disabled */
if (cur > MAX_EQD)
cur = MAX_EQD;
eq_objectp = &pnob->event_q_obj;
status =
be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
NULL, NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = cur;
}
return 0;
}
static u32 be_get_rx_csum(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
return adapter->rx_csum;
}
static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
if (data)
adapter->rx_csum = 1;
else
adapter->rx_csum = 0;
return 0;
}
static void
be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *benet_gstrings_stats,
sizeof(benet_gstrings_stats));
break;
}
}
static int be_get_stats_count(struct net_device *netdev)
{
return BENET_STATS_LEN;
}
static void
be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
int i;
benet_get_stats(netdev);
for (i = 0; i <= NET_DEV_STATS_LEN; i++)
data[i] = ((unsigned long *)&adapter->benet_stats)[i];
data[i] = adapter->be_stat.bes_tx_reqs;
data[i++] = adapter->be_stat.bes_tx_fails;
data[i++] = adapter->be_stat.bes_fwd_reqs;
data[i++] = adapter->be_stat.bes_tx_wrbs;
data[i++] = adapter->be_stat.bes_ints;
data[i++] = adapter->be_stat.bes_events;
data[i++] = adapter->be_stat.bes_tx_events;
data[i++] = adapter->be_stat.bes_rx_events;
data[i++] = adapter->be_stat.bes_tx_compl;
data[i++] = adapter->be_stat.bes_rx_compl;
data[i++] = adapter->be_stat.bes_ethrx_post_fail;
data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
data[i++] = adapter->be_stat.bes_rx_misc_pkts;
data[i++] = adapter->be_stat.bes_eth_tx_rate;
data[i++] = adapter->be_stat.bes_eth_rx_rate;
data[i++] = adapter->be_stat.bes_rx_coal;
data[i++] = adapter->be_stat.bes_rx_flush;
}
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
/* Get the Ring parameters from the pnob */
static void
be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct be_net_object *pnob = netdev_priv(netdev);
/* Pre Set Maxims */
ring->rx_max_pending = pnob->rx_q_len;
ring->rx_mini_max_pending = ring->rx_mini_max_pending;
ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
ring->tx_max_pending = pnob->tx_q_len;
/* Current hardware Settings */
ring->rx_pending = atomic_read(&pnob->rx_q_posted);
ring->rx_mini_pending = ring->rx_mini_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_pending;
ring->tx_pending = atomic_read(&pnob->tx_q_used);
}
static void
be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool rxfc, txfc;
int status;
status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to get pause frame settings\n");
/* return defaults */
ecmd->rx_pause = 1;
ecmd->tx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
return;
}
if (txfc == true)
ecmd->tx_pause = 1;
else
ecmd->tx_pause = 0;
if (rxfc == true)
ecmd->rx_pause = 1;
else
ecmd->rx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
}
static int
be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool txfc, rxfc;
int status;
if (ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
if (ecmd->tx_pause)
txfc = true;
else
txfc = false;
if (ecmd->rx_pause)
rxfc = true;
else
rxfc = false;
status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to set pause frame settings\n");
return -1;
}
return 0;
}
struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
.get_rx_csum = be_get_rx_csum,
.set_rx_csum = be_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_strings = be_get_strings,
.get_stats_count = be_get_stats_count,
.get_ethtool_stats = be_get_ethtool_stats,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,863 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include <linux/if_vlan.h>
#include <linux/inet_lro.h>
#include "benet.h"
/* number of bytes of RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define NETIF_RX(skb) netif_receive_skb(skb)
#define VLAN_ACCEL_RX(skb, pnob, vt) \
vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
/*
This function notifies BladeEngine of the number of completion
entries processed from the specified completion queue by writing
the number of popped entries to the door bell.
pnob - Pointer to the NetObject structure
n - Number of completion entries processed
cq_id - Queue ID of the completion queue for which notification
is being done.
re_arm - 1 - rearm the completion ring to generate an event.
- 0 - dont rearm the completion ring to generate an event
*/
void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
{
struct CQ_DB_AMAP cqdb;
cqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
}
/*
* adds additional receive frags indicated by BE starting from given
* frag index (fi) to specified skb's frag list
*/
static void
add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
u32 nresid, u32 fi)
{
struct be_adapter *adapter = pnob->adapter;
u32 sk_frag_idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size;
sk_frag_idx = skb_shinfo(skb)->nr_frags;
while (nresid) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = NULL;
if ((rx_page_info->page_offset) ||
(pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
skb_shinfo(skb)->frags[sk_frag_idx].page_offset
= rx_page_info->page_offset;
skb_shinfo(skb)->frags[sk_frag_idx].size = n;
sk_frag_idx++;
skb->len += n;
skb->data_len += n;
skb_shinfo(skb)->nr_frags++;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
}
/*
* This function processes incoming nic packets over various Rx queues.
* This function takes the adapter, the current Rx status descriptor
* entry and the Rx completion queue ID as argument.
*/
static inline int process_nic_rx_completion(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct sk_buff *skb;
int udpcksm, tcpcksm;
int n;
u32 nresid, fi;
u32 frag_sz = pnob->rx_buf_size;
u8 *va;
struct be_rx_page_info *rx_page_info;
u32 numfrags, vtp, vtm, vlan_tag, pktsize;
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = NULL;
/*
* If one page is used per fragment or if this is the second half of
* of the page, unmap the page here
*/
if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus), frag_sz,
PCI_DMA_FROMDEVICE);
}
atomic_dec(&pnob->rx_q_posted);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
/*
* get rid of RX flush completions first.
*/
if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return 0;
}
skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
if (skb == NULL) {
dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
goto free_frags;
}
skb_reserve(skb, NET_IP_ALIGN);
skb->dev = pnob->netdev;
n = min(pktsize, frag_sz);
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
skb->len = n;
skb->data_len = n;
if (n <= BE_HDR_LEN) {
memcpy(skb->data, va, n);
put_page(rx_page_info->page);
skb->data_len -= n;
skb->tail += n;
} else {
/* Setup the SKB with page buffer information */
skb_shinfo(skb)->frags[0].page = rx_page_info->page;
skb_shinfo(skb)->nr_frags++;
/* Copy the header into the skb_data */
memcpy(skb->data, va, BE_HDR_LEN);
skb_shinfo(skb)->frags[0].page_offset =
rx_page_info->page_offset + BE_HDR_LEN;
skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
skb->data_len -= BE_HDR_LEN;
skb->tail += BE_HDR_LEN;
}
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
nresid = pktsize - n;
skb->protocol = eth_type_trans(skb, pnob->netdev);
if ((tcpcksm || udpcksm) && adapter->rx_csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
/*
* if we have more bytes left, the frame has been
* given to us in multiple fragments. This happens
* with Jumbo frames. Add the remaining fragments to
* skb->frags[] array.
*/
if (nresid)
add_skb_frags(pnob, skb, nresid, fi);
/* update the the true size of the skb. */
skb->truesize = skb->len + sizeof(struct sk_buff);
/*
* If a 802.3 frame or 802.2 LLC frame
* (i.e) contains length field in MAC Hdr
* and frame len is greater than 64 bytes
*/
if (((skb->protocol == ntohs(ETH_P_802_2)) ||
(skb->protocol == ntohs(ETH_P_802_3)))
&& (pktsize > BE_HDR_LEN)) {
/*
* If the length given in Mac Hdr is less than frame size
* Erraneous frame, Drop it
*/
if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
/* Increment Non Ether type II frames dropped */
adapter->be_stat.bes_802_3_dropped_frames++;
kfree_skb(skb);
return 0;
}
/*
* else if the length given in Mac Hdr is greater than
* frame size, should not be seeing this sort of frames
* dump the pkt and pass to stack
*/
else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
/* Increment Non Ether type II frames malformed */
adapter->be_stat.bes_802_3_malformed_frames++;
}
}
vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
if (vtp && vtm) {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (!pnob->vlan_grp || pnob->num_vlans == 0) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
kfree_skb(skb);
return 0;
}
/* pass the VLAN packet to stack */
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
} else {
NETIF_RX(skb);
}
return 0;
free_frags:
/* free all frags associated with the current rxcp */
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
while (numfrags-- > 1) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)
pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
return -ENOMEM;
}
static void process_nic_rx_completion_lro(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
unsigned int udpcksm, tcpcksm;
u32 numfrags, vlanf, vtm, vlan_tag, nresid;
u16 vlant;
unsigned int fi, idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size, pktsize;
bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
u8 err, *va;
__wsum csum = 0;
if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
/* Drop the pkt and move to the next completion. */
adapter->be_stat.bes_rx_misc_pkts++;
return;
}
err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
if (err || !rx_coal) {
/* We won't coalesce Rx pkts if the err bit set.
* take the path of normal completion processing */
process_nic_rx_completion(pnob, rxcp);
return;
}
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = (void *)NULL;
/* If one page is used per fragment or if this is the
* second half of the page, unmap the page here
*/
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
vlant = be16_to_cpu(vlan_tag);
vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
atomic_dec(&pnob->rx_q_posted);
if (tcpcksm && udpcksm && pktsize == 32) {
/* flush completion entries */
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return;
}
/* Only one of udpcksum and tcpcksum can be set */
BUG_ON(udpcksm && tcpcksm);
/* jumbo frames could come in multiple fragments */
BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
n = min(pktsize, frag_sz);
nresid = pktsize - n; /* will be useful for jumbo pkts */
idx = 0;
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
/* If we got multiple fragments, we have more data. */
while (nresid) {
idx++;
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
if (likely(!(vlanf && vtm))) {
lro_receive_frags(&pnob->lro_mgr, rx_frags,
pktsize, pktsize,
(void *)(unsigned long)csum, csum);
} else {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
return;
}
/* pass the VLAN packet to stack */
lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
rx_frags, pktsize, pktsize,
pnob->vlan_grp, vlant,
(void *)(unsigned long)csum,
csum);
}
adapter->be_stat.bes_rx_coal++;
}
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
{
struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
u32 valid, ct;
valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
if (valid == 0)
return NULL;
ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
if (ct != 0) {
/* Invalid chute #. treat as error */
AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
}
be_adv_rxcq_tl(pnob);
AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
return rxcp;
}
static void update_rx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_rx_bytes /
((jiffies - adapter->eth_rx_jiffies) / (HZ));
r = (r / 1000000); /* MB/Sec */
/* Mega Bits/Sec */
adapter->be_stat.bes_eth_rx_rate = (r * 8);
adapter->eth_rx_jiffies = jiffies;
adapter->eth_rx_bytes = 0;
}
}
static int process_rx_completions(struct be_net_object *pnob, int max_work)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_RX_COMPL_AMAP *rxcp;
u32 nc = 0;
unsigned int pktsize;
while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
prefetch(rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
process_nic_rx_completion_lro(pnob, rxcp);
adapter->eth_rx_bytes += pktsize;
update_rx_rate(adapter);
nc++;
max_work--;
adapter->be_stat.bes_rx_compl++;
}
if (likely(adapter->max_rx_coal > 1)) {
adapter->be_stat.bes_rx_flush++;
lro_flush_all(&pnob->lro_mgr);
}
/* Refill the queue */
if (atomic_read(&pnob->rx_q_posted) < 900)
be_post_eth_rx_buffs(pnob);
return nc;
}
static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
{
struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
u32 valid;
valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
if (valid == 0)
return NULL;
AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
be_adv_txcq_tl(pnob);
return txcp;
}
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
{
struct be_adapter *adapter = pnob->adapter;
int cur_index, tx_wrbs_completed = 0;
struct sk_buff *skb;
u64 busaddr, pa, pa_lo, pa_hi;
struct ETH_WRB_AMAP *wrb;
u32 frag_len, last_index, j;
last_index = tx_compl_lastwrb_idx_get(pnob);
BUG_ON(last_index != end_idx);
pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
do {
cur_index = pnob->tx_q_tl;
wrb = &pnob->tx_q[cur_index];
pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
busaddr = (pa_hi << 32) | pa_lo;
if (busaddr != 0) {
pa = le64_to_cpu(busaddr);
pci_unmap_single(adapter->pdev, pa,
frag_len, PCI_DMA_TODEVICE);
}
if (cur_index == last_index) {
skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
BUG_ON(!skb);
for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[j];
pci_unmap_page(adapter->pdev,
(ulong) frag->page, frag->size,
PCI_DMA_TODEVICE);
}
kfree_skb(skb);
pnob->tx_ctxt[cur_index] = NULL;
} else {
BUG_ON(pnob->tx_ctxt[cur_index]);
}
tx_wrbs_completed++;
be_adv_txq_tl(pnob);
} while (cur_index != last_index);
atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
}
/* there is no need to take an SMP lock here since currently
* we have only one instance of the tasklet that does completion
* processing.
*/
static void process_nic_tx_completions(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_TX_COMPL_AMAP *txcp;
struct net_device *netdev = pnob->netdev;
u32 end_idx, num_processed = 0;
adapter->be_stat.bes_tx_events++;
while ((txcp = be_get_tx_cmpl(pnob))) {
end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
process_one_tx_compl(pnob, end_idx);
num_processed++;
adapter->be_stat.bes_tx_compl++;
}
be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
/*
* We got Tx completions and have usable WRBs.
* If the netdev's queue has been stopped
* because we had run out of WRBs, wake it now.
*/
spin_lock(&adapter->txq_lock);
if (netif_queue_stopped(netdev)
&& atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
netif_wake_queue(netdev);
}
spin_unlock(&adapter->txq_lock);
}
static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
{
u32 nposted = 0;
struct ETH_RX_D_AMAP *rxd = NULL;
struct be_recv_buffer *rxbp;
void **rx_ctxp;
struct RQ_DB_AMAP rqdb;
rx_ctxp = pnob->rx_ctxt;
while (!list_empty(rxbl) &&
(rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
list_del(&rxbp->rxb_list);
rxd = pnob->rx_q + pnob->rx_q_hd;
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
be_adv_rxq_hd(pnob);
nposted++;
}
if (nposted) {
/* Now press the door bell to notify BladeEngine. */
rqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
}
atomic_add(nposted, &pnob->rx_q_posted);
return nposted;
}
void be_post_eth_rx_buffs(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
u32 num_bufs, r;
u64 busaddr = 0, tmp_pa;
u32 max_bufs, pg_hd;
u32 frag_size;
struct be_recv_buffer *rxbp;
struct list_head rxbl;
struct be_rx_page_info *rx_page_info;
struct page *page = NULL;
u32 page_order = 0;
gfp_t alloc_flags = GFP_ATOMIC;
BUG_ON(!adapter);
max_bufs = 64; /* should be even # <= 255. */
frag_size = pnob->rx_buf_size;
page_order = get_order(frag_size);
if (frag_size == 8192)
alloc_flags |= (gfp_t) __GFP_COMP;
/*
* Form a linked list of RECV_BUFFFER structure to be be posted.
* We will post even number of buffer so that pages can be
* shared.
*/
INIT_LIST_HEAD(&rxbl);
for (num_bufs = 0; num_bufs < max_bufs &&
!pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
rxbp = &pnob->eth_rx_bufs[num_bufs];
pg_hd = pnob->rx_pg_info_hd;
rx_page_info = &pnob->rx_page_info[pg_hd];
if (!page) {
page = alloc_pages(alloc_flags, page_order);
if (unlikely(page == NULL)) {
adapter->be_stat.bes_ethrx_post_fail++;
pnob->rxbuf_post_fail++;
break;
}
pnob->rxbuf_post_fail = 0;
busaddr = pci_map_page(adapter->pdev, page, 0,
frag_size, PCI_DMA_FROMDEVICE);
rx_page_info->page_offset = 0;
rx_page_info->page = page;
/*
* If we are sharing a page among two skbs,
* alloc a new one on the next iteration
*/
if (pnob->rx_pg_shared == false)
page = NULL;
} else {
get_page(page);
rx_page_info->page_offset += frag_size;
rx_page_info->page = page;
/*
* We are finished with the alloced page,
* Alloc a new one on the next iteration
*/
page = NULL;
}
rxbp->rxb_ctxt = (void *)rx_page_info;
index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
pci_unmap_addr_set(rx_page_info, bus, busaddr);
tmp_pa = busaddr + rx_page_info->page_offset;
rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
rxbp->rxb_pa_hi = (tmp_pa >> 32);
rxbp->rxb_len = frag_size;
list_add_tail(&rxbp->rxb_list, &rxbl);
} /* End of for */
r = post_rx_buffs(pnob, &rxbl);
BUG_ON(r != num_bufs);
return;
}
/*
* Interrupt service for network function. We just schedule the
* tasklet which does all completion processing.
*/
irqreturn_t be_int(int irq, void *dev)
{
struct net_device *netdev = dev;
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 isr;
isr = CSR_READ(&pnob->fn_obj, cev.isr1);
if (unlikely(!isr))
return IRQ_NONE;
spin_lock(&adapter->int_lock);
adapter->isr |= isr;
spin_unlock(&adapter->int_lock);
adapter->be_stat.bes_ints++;
tasklet_schedule(&adapter->sts_handler);
return IRQ_HANDLED;
}
/*
* Poll function called by NAPI with a work budget.
* We process as many UC. BC and MC receive completions
* as the budget allows and return the actual number of
* RX ststutses processed.
*/
int be_poll(struct napi_struct *napi, int budget)
{
struct be_net_object *pnob =
container_of(napi, struct be_net_object, napi);
u32 work_done;
pnob->adapter->be_stat.bes_polls++;
work_done = process_rx_completions(pnob, budget);
BUG_ON(work_done > budget);
/* All consumed */
if (work_done < budget) {
netif_rx_complete(napi);
/* enable intr */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
} else {
/* More to be consumed; continue with interrupts disabled */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
}
return work_done;
}
static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
{
struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
return NULL;
be_adv_eq_tl(pnob);
return eqp;
}
/*
* Processes all valid events in the event ring associated with given
* NetObject. Also, notifies BE the number of events processed.
*/
static inline u32 process_events(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct EQ_ENTRY_AMAP *eqp;
u32 rid, num_events = 0;
struct net_device *netdev = pnob->netdev;
while ((eqp = get_event(pnob)) != NULL) {
adapter->be_stat.bes_events++;
rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
if (rid == pnob->rx_cq_id) {
adapter->be_stat.bes_rx_events++;
netif_rx_schedule(&pnob->napi);
} else if (rid == pnob->tx_cq_id) {
process_nic_tx_completions(pnob);
} else if (rid == pnob->mcc_cq_id) {
be_mcc_process_cq(&pnob->mcc_q_obj, 1);
} else {
dev_info(&netdev->dev,
"Invalid EQ ResourceID %d\n", rid);
}
AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
num_events++;
}
return num_events;
}
static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
{
int status;
struct be_eq_object *eq_objectp;
/* update once a second */
if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
/* One second elapsed since last update */
u32 r, new_eqd = -1;
r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
adapter->be_stat.bes_ips = r;
adapter->ips_jiffies = jiffies;
adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
new_eqd = (adapter->cur_eqd + 8);
if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
new_eqd = (adapter->cur_eqd - 8);
if (adapter->enable_aic && new_eqd != -1) {
eq_objectp = &pnob->event_q_obj;
status = be_eq_modify_delay(&pnob->fn_obj, 1,
&eq_objectp, &new_eqd, NULL,
NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = new_eqd;
}
}
}
/*
This function notifies BladeEngine of how many events were processed
from the event queue by ringing the corresponding door bell and
optionally re-arms the event queue.
n - number of events processed
re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
*/
static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
{
struct CQ_DB_AMAP eqdb;
eqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
/*
* Under some situations we see an interrupt and no valid
* EQ entry. To keep going, we need to ring the DB even if
* numPOsted is 0.
*/
PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
return;
}
/*
* Called from the tasklet scheduled by ISR. All real interrupt processing
* is done here.
*/
void be_process_intr(unsigned long context)
{
struct be_adapter *adapter = (struct be_adapter *)context;
struct be_net_object *pnob = adapter->net_obj;
u32 isr, n;
ulong flags = 0;
isr = adapter->isr;
/*
* we create only one NIC event queue in Linux. Event is
* expected only in the first event queue
*/
BUG_ON(isr & 0xfffffffe);
if ((isr & 1) == 0)
return; /* not our interrupt */
n = process_events(pnob);
/*
* Clear the event bit. adapter->isr is set by
* hard interrupt. Prevent race with lock.
*/
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->isr &= ~1;
spin_unlock_irqrestore(&adapter->int_lock, flags);
be_notify_event(pnob, n, 1);
/*
* If previous allocation attempts had failed and
* BE has used up all posted buffers, post RX buffers here
*/
if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
be_post_eth_rx_buffs(pnob);
update_eqd(adapter, pnob);
return;
}

View File

@ -1,705 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_netif.c
*
* This file contains various entry points of drivers seen by tcp/ip stack.
*/
#include <linux/if_vlan.h>
#include <linux/in.h>
#include "benet.h"
#include <linux/ip.h>
#include <linux/inet_lro.h>
/* Strings to print Link properties */
static const char *link_speed[] = {
"Invalid link Speed Value",
"10 Mbps",
"100 Mbps",
"1 Gbps",
"10 Gbps"
};
static const char *link_duplex[] = {
"Invalid Duplex Value",
"Half Duplex",
"Full Duplex"
};
static const char *link_state[] = {
"",
"(active)"
};
void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
{
u16 si, di, ai;
/* Port 0 */
if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
/* Port is up and running */
si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
di = (lnk_status->mac0_duplex < 3) ?
lnk_status->mac0_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 0: Down\n");
/* Port 1 */
if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
/* Port is up and running */
si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
di = (lnk_status->mac1_duplex < 3) ?
lnk_status->mac1_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 1: Down\n");
return;
}
static int
be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr,
u64 *hdr_flags, void *priv)
{
struct ethhdr *eh;
struct vlan_ethhdr *veh;
struct iphdr *iph;
u8 *va = page_address(frag->page) + frag->page_offset;
unsigned long ll_hlen;
/* find the mac header, abort if not IPv4 */
prefetch(va);
eh = (struct ethhdr *)va;
*mac_hdr = eh;
ll_hlen = ETH_HLEN;
if (eh->h_proto != htons(ETH_P_IP)) {
if (eh->h_proto == htons(ETH_P_8021Q)) {
veh = (struct vlan_ethhdr *)va;
if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -1;
ll_hlen += VLAN_HLEN;
} else {
return -1;
}
}
*hdr_flags = LRO_IPV4;
iph = (struct iphdr *)(va + ll_hlen);
*ip_hdr = iph;
if (iph->protocol != IPPROTO_TCP)
return -1;
*hdr_flags |= LRO_TCP;
*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
return 0;
}
static int benet_open(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct net_lro_mgr *lro_mgr;
if (adapter->dev_state < BE_DEV_STATE_INIT)
return -EAGAIN;
lro_mgr = &pnob->lro_mgr;
lro_mgr->dev = netdev;
lro_mgr->features = LRO_F_NAPI;
lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
lro_mgr->lro_arr = pnob->lro_desc;
lro_mgr->get_frag_header = be_get_frag_header;
lro_mgr->max_aggr = adapter->max_rx_coal;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
be_update_link_status(adapter);
/*
* Set carrier on only if Physical Link up
* Either of the port link status up signifies this
*/
if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
(adapter->port1_link_sts == BE_PORT_LINK_UP)) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
adapter->dev_state = BE_DEV_STATE_OPEN;
napi_enable(&pnob->napi);
be_enable_intr(pnob);
be_enable_eq_intr(pnob);
/*
* RX completion queue may be in dis-armed state. Arm it.
*/
be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
return 0;
}
static int benet_close(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
netif_stop_queue(netdev);
synchronize_irq(netdev->irq);
be_wait_nic_tx_cmplx_cmpl(pnob);
adapter->dev_state = BE_DEV_STATE_INIT;
netif_carrier_off(netdev);
adapter->port0_link_sts = BE_PORT_LINK_DOWN;
adapter->port1_link_sts = BE_PORT_LINK_DOWN;
be_disable_intr(pnob);
be_disable_eq_intr(pnob);
napi_disable(&pnob->napi);
return 0;
}
/*
* Setting a Mac Address for BE
* Takes netdev and a void pointer as arguments.
* The pointer holds the new addres to be used.
*/
static int benet_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct be_net_object *pnob = netdev_priv(netdev);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
/*
* Since we are doing Active-Passive failover, both
* ports should have matching MAC addresses everytime.
*/
be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
return 0;
}
void be_get_stats_timer_handler(unsigned long context)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
del_timer(&ctxt->get_stats_timer);
return;
}
void be_get_stat_cb(void *context, int status,
struct MCC_WRB_AMAP *optional_wrb)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
/*
* just up the semaphore if the get_stat_flag
* reads 1. so that the waiter can continue.
* If it is 0, then it was handled by the timer handler.
*/
del_timer(&ctxt->get_stats_timer);
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
}
struct net_device_stats *benet_get_stats(struct net_device *dev)
{
struct be_net_object *pnob = netdev_priv(dev);
struct be_adapter *adapter = pnob->adapter;
u64 pa;
struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
if (adapter->dev_state != BE_DEV_STATE_OPEN) {
/* Return previously read stats */
return &(adapter->benet_stats);
}
/* Get Physical Addr */
pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
atomic_inc(&ctxt->get_stat_flag);
be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
cpu_to_le64(pa), be_get_stat_cb, ctxt,
NULL);
ctxt->get_stats_timer.data = (unsigned long)ctxt;
mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
/* Adding port0 and port1 stats. */
adapter->benet_stats.rx_packets =
adapter->eth_statsp->params.response.p0recvdtotalframes +
adapter->eth_statsp->params.response.p1recvdtotalframes;
adapter->benet_stats.tx_packets =
adapter->eth_statsp->params.response.p0xmitunicastframes +
adapter->eth_statsp->params.response.p1xmitunicastframes;
adapter->benet_stats.tx_bytes =
adapter->eth_statsp->params.response.p0xmitbyteslsd +
adapter->eth_statsp->params.response.p1xmitbyteslsd;
adapter->benet_stats.rx_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_bytes =
adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
adapter->benet_stats.rx_crc_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitbroadcastframes +
adapter->eth_statsp->params.response.p1xmitbroadcastframes;
adapter->benet_stats.tx_errors = 0;
adapter->benet_stats.multicast =
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.rx_fifo_errors =
adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
adapter->benet_stats.rx_frame_errors =
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_length_errors =
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0outrangeerrors +
adapter->eth_statsp->params.response.p1outrangeerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0frametoolongerrors +
adapter->eth_statsp->params.response.p1frametoolongerrors;
pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
return &(adapter->benet_stats);
}
static void be_start_tx(struct be_net_object *pnob, u32 nposted)
{
#define CSR_ETH_MAX_SQPOSTS 255
struct SQ_DB_AMAP sqdb;
sqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
while (nposted) {
if (nposted > CSR_ETH_MAX_SQPOSTS) {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
CSR_ETH_MAX_SQPOSTS);
nposted -= CSR_ETH_MAX_SQPOSTS;
} else {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
nposted = 0;
}
PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
}
return;
}
static void update_tx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_tx_bytes /
((jiffies - adapter->eth_tx_jiffies) / (HZ));
r = (r / 1000000); /* M bytes/s */
adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
adapter->eth_tx_jiffies = jiffies;
adapter->eth_tx_bytes = 0;
}
}
static int wrb_cnt_in_skb(struct sk_buff *skb)
{
int cnt = 0;
while (skb) {
if (skb->len > skb->data_len)
cnt++;
cnt += skb_shinfo(skb)->nr_frags;
skb = skb_shinfo(skb)->frag_list;
}
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
{
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
}
static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
struct be_net_object *pnob)
{
wrb->dw[2] = 0;
wrb->dw[3] = 0;
AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
skb_shinfo(skb)->gso_size);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
if (proto == IPPROTO_TCP)
AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
else if (proto == IPPROTO_UDP)
AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
}
if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
}
}
static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
struct ETH_WRB_AMAP *from)
{
to->dw[2] = from->dw[2];
to->dw[3] = from->dw[3];
}
/* Returns the actual count of wrbs used including a possible dummy */
static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
u32 wrb_cnt, u32 *copied)
{
u64 busaddr;
struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
u32 i;
bool dummy = true;
struct pci_dev *pdev = pnob->adapter->pdev;
if (wrb_cnt & 1)
wrb_cnt++;
else
dummy = false;
atomic_add(wrb_cnt, &pnob->tx_q_used);
while (skb) {
if (skb->len > skb->data_len) {
int len = skb->len - skb->data_len;
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, len);
be_adv_txq_hd(pnob);
*copied += len;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, frag->size);
be_adv_txq_hd(pnob);
*copied += frag->size;
}
skb = skb_shinfo(skb)->frag_list;
}
if (dummy) {
wrb = &pnob->tx_q[pnob->tx_q_hd];
BUG_ON(first == NULL);
wrb_copy_extra(wrb, first);
wrb_fill(wrb, 0, 0);
be_adv_txq_hd(pnob);
}
AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
return wrb_cnt;
}
/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
* start index and skb pointer in the end index
*/
static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
struct sk_buff *skb, int wrb_cnt,
u32 start)
{
*(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
pnob->tx_ctxt[start] = skb;
}
static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 wrb_cnt, copied = 0;
u32 start = pnob->tx_q_hd;
adapter->be_stat.bes_tx_reqs++;
wrb_cnt = wrb_cnt_in_skb(skb);
spin_lock_bh(&adapter->txq_lock);
if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
netif_stop_queue(pnob->netdev);
spin_unlock_bh(&adapter->txq_lock);
adapter->be_stat.bes_tx_fails++;
return NETDEV_TX_BUSY;
}
spin_unlock_bh(&adapter->txq_lock);
wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
be_start_tx(pnob, wrb_cnt);
adapter->eth_tx_bytes += copied;
adapter->be_stat.bes_tx_wrbs += wrb_cnt;
update_tx_rate(adapter);
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/*
* This is the driver entry point to change the mtu of the device
* Returns 0 for success and errno for failure.
*/
static int benet_change_mtu(struct net_device *netdev, int new_mtu)
{
/*
* BE supports jumbo frame size upto 9000 bytes including the link layer
* header. Considering the different variants of frame formats possible
* like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
*/
if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
dev_info(&netdev->dev, "Invalid MTU requested. "
"Must be between %d and %d bytes\n",
(ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
return -EINVAL;
}
dev_info(&netdev->dev, "MTU changed from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
/*
* This is the driver entry point to register a vlan with the device
*/
static void benet_vlan_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct be_net_object *pnob = netdev_priv(netdev);
be_disable_eq_intr(pnob);
pnob->vlan_grp = grp;
pnob->num_vlans = 0;
be_enable_eq_intr(pnob);
}
/*
* This is the driver entry point to add a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
/* no way to return an error */
dev_info(&netdev->dev,
"BladeEngine: Cannot configure more than %d Vlans\n",
BE_NUM_VLAN_SUPPORTED);
return;
}
/* The new vlan tag will be in the slot indicated by num_vlans. */
pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This is the driver entry point to remove a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
u32 i, value;
/*
* In Blade Engine, we support 32 vlan tag filters across both ports.
* To program a vlan tag, the RXF_RTPR_CSR register is used.
* Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
* The Vlan table is of depth 16. thus we support 32 tags.
*/
value = vlan_id | VLAN_VALID_BIT;
for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
if (pnob->vlan_tag[i] == vlan_id)
break;
}
if (i == BE_NUM_VLAN_SUPPORTED)
return;
/* Now compact the vlan tag array by removing hole created. */
while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
i++;
}
if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
pnob->vlan_tag[i] = (u16) 0x0;
pnob->num_vlans--;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This function is called to program multicast
* address in the multicast filter of the ASIC.
*/
static void be_set_multicast_filter(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
u8 mac_addr[32][ETH_ALEN];
int i;
if (netdev->flags & IFF_ALLMULTI) {
/* set BE in Multicast promiscuous */
be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
NULL, NULL);
return;
}
for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
mc_ptr = mc_ptr->next, i++) {
memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
}
/* reset the promiscuous mode also. */
be_rxf_multicast_config(&pnob->fn_obj, false, i,
&mac_addr[0][0], NULL, NULL, NULL);
}
/*
* This is the driver entry point to set multicast list
* with the device netdev. This function will be used to
* set promiscuous mode or multicast promiscuous mode
* or multicast mode....
*/
static void benet_set_multicast_list(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
} else {
be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
be_set_multicast_filter(netdev);
}
}
int benet_init(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
ether_setup(netdev);
netdev->open = &benet_open;
netdev->stop = &benet_close;
netdev->hard_start_xmit = &benet_xmit;
netdev->get_stats = &benet_get_stats;
netdev->set_multicast_list = &benet_set_multicast_list;
netdev->change_mtu = &benet_change_mtu;
netdev->set_mac_address = &benet_set_mac_addr;
netdev->vlan_rx_register = benet_vlan_register;
netdev->vlan_rx_add_vid = benet_vlan_add_vid;
netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
netdev->features =
NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
netdev->flags |= IFF_MULTICAST;
/* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
if (adapter->dma_64bit_cap)
netdev->features |= NETIF_F_HIGHDMA;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
return 0;
}

View File

@ -1,429 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BENET_H_
#define _BENET_H_
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/inet_lro.h>
#include "hwlib.h"
#define _SA_MODULE_NAME "net-driver"
#define VLAN_VALID_BIT 0x8000
#define BE_NUM_VLAN_SUPPORTED 32
#define BE_PORT_LINK_DOWN 0000
#define BE_PORT_LINK_UP 0001
#define BE_MAX_TX_FRAG_COUNT (30)
/* Flag bits for send operation */
#define IPCS (1 << 0) /* Enable IP checksum offload */
#define UDPCS (1 << 1) /* Enable UDP checksum offload */
#define TCPCS (1 << 2) /* Enable TCP checksum offload */
#define LSO (1 << 3) /* Enable Large Segment offload */
#define ETHVLAN (1 << 4) /* Enable VLAN insert */
#define ETHEVENT (1 << 5) /* Generate event on completion */
#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
#define IPSEC (1 << 7) /* Enable IPSEC */
#define FORWARD (1 << 8) /* Send the packet in forwarding path */
#define FIN (1 << 9) /* Issue FIN segment */
#define BE_MAX_MTU 8974
#define BE_MAX_LRO_DESCRIPTORS 8
#define BE_LRO_MAX_PKTS 64
#define BE_MAX_FRAGS_PER_FRAME 6
extern const char be_drvr_ver[];
extern char be_fw_ver[];
extern char be_driver_name[];
extern struct ethtool_ops be_ethtool_ops;
#define BE_DEV_STATE_NONE 0
#define BE_DEV_STATE_INIT 1
#define BE_DEV_STATE_OPEN 2
#define BE_DEV_STATE_SUSPEND 3
/* This structure is used to describe physical fragments to use
* for DMAing data from NIC.
*/
struct be_recv_buffer {
struct list_head rxb_list; /* for maintaining a linked list */
void *rxb_va; /* buffer virtual address */
u32 rxb_pa_lo; /* low part of physical address */
u32 rxb_pa_hi; /* high part of physical address */
u32 rxb_len; /* length of recv buffer */
void *rxb_ctxt; /* context for OSM driver to use */
};
/*
* fragment list to describe scattered data.
*/
struct be_tx_frag_list {
u32 txb_len; /* Size of this fragment */
u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
};
struct be_rx_page_info {
struct page *page;
dma_addr_t bus;
u16 page_offset;
};
/*
* This structure is the main tracking structure for a NIC interface.
*/
struct be_net_object {
/* MCC Ring - used to send fwcmds to embedded ARM processor */
struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */
u32 mcc_q_len; /* # of WRB entries in this ring */
u32 mcc_q_size;
u32 mcc_q_hd; /* MCC ring head */
u8 mcc_q_created; /* flag to help cleanup */
struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
dma_addr_t mcc_q_bus; /* DMA'ble bus address */
/* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
u32 mcc_cq_len; /* # of compl. entries in this ring */
u32 mcc_cq_size;
u32 mcc_cq_tl; /* compl. ring tail */
u8 mcc_cq_created; /* flag to help cleanup */
struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
u32 mcc_cq_id; /* MCC ring ID */
dma_addr_t mcc_cq_bus; /* DMA'ble bus address */
struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */
void *mb_ptr; /* mailbox ptr to be freed */
dma_addr_t mb_bus; /* DMA'ble bus address */
u32 mb_size;
/* BEClib uses an array of context objects to track outstanding
* requests to the MCC. We need allocate the same number of
* conext entries as the number of entries in the MCC WRB ring
*/
u32 mcc_wrb_ctxt_size;
void *mcc_wrb_ctxt; /* pointer to the context area */
u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
/*
* NIC send request ring - used for xmitting raw ether frames.
*/
struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */
u32 tx_q_len; /* # if entries in the send ring */
u32 tx_q_size;
u32 tx_q_hd; /* Head index. Next req. goes here */
u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
u8 tx_q_created; /* flag to help cleanup */
struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
dma_addr_t tx_q_bus; /* DMA'ble bus address */
u32 tx_q_id; /* send queue ring ID */
u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
atomic_t tx_q_used; /* # of WRBs used */
/* ptr to an array in which we store context info for each send req. */
void **tx_ctxt;
/*
* NIC Send compl. ring - completion status for all NIC frames xmitted.
*/
struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
u32 txcq_len; /* # of entries in the ring */
u32 tx_cq_size;
/*
* index into compl ring where the host expects next completion entry
*/
u32 tx_cq_tl;
u32 tx_cq_id; /* completion queue id */
u8 tx_cq_created; /* flag to help cleanup */
struct be_cq_object tx_cq_obj;
dma_addr_t tx_cq_bus; /* DMA'ble bus address */
/*
* Event Queue - all completion entries post events here.
*/
struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */
u32 event_q_len; /* # of entries */
u32 event_q_size;
u32 event_q_tl; /* Tail of the event queue */
u32 event_q_id; /* Event queue ID */
u8 event_q_created; /* flag to help cleanup */
struct be_eq_object event_q_obj; /* Queue handle */
dma_addr_t event_q_bus; /* DMA'ble bus address */
/*
* NIC receive queue - Data buffers to be used for receiving unicast,
* broadcast and multi-cast frames are posted here.
*/
struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */
u32 rx_q_len; /* # of entries */
u32 rx_q_size;
u32 rx_q_hd; /* Head of the queue */
atomic_t rx_q_posted; /* number of posted buffers */
u32 rx_q_id; /* queue ID */
u8 rx_q_created; /* flag to help cleanup */
struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */
dma_addr_t rx_q_bus; /* DMA'ble bus address */
/*
* Pointer to an array of opaque context object for use by OSM driver
*/
void **rx_ctxt;
/*
* NIC unicast RX completion queue - all unicast ether frame completion
* statuses from BE come here.
*/
struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */
u32 rx_cq_len; /* # of entries */
u32 rx_cq_size;
u32 rx_cq_tl; /* Tail of the queue */
u32 rx_cq_id; /* queue ID */
u8 rx_cq_created; /* flag to help cleanup */
struct be_cq_object rx_cq_obj; /* queue handle */
dma_addr_t rx_cq_bus; /* DMA'ble bus address */
struct be_function_object fn_obj; /* function object */
bool fn_obj_created;
u32 rx_buf_size; /* Size of the RX buffers */
struct net_device *netdev;
struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
addresses */
struct be_adapter *adapter; /* Pointer to OSM adapter */
u32 devno; /* OSM, network dev no. */
u32 use_port; /* Current active port */
struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */
u32 rx_pg_info_hd; /* Head of queue */
int rxbuf_post_fail; /* RxBuff posting fail count */
bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */
struct vlan_group *vlan_grp;
u32 num_vlans; /* Number of vlans in BE's filter */
u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
struct napi_struct napi;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
};
#define NET_FH(np) (&(np)->fn_obj)
/*
* BE driver statistics.
*/
struct be_drvr_stat {
u32 bes_tx_reqs; /* number of TX requests initiated */
u32 bes_tx_fails; /* number of TX requests that failed */
u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
u32 bes_tx_wrbs; /* number of tx WRBs used */
u32 bes_ints; /* number of interrupts */
u32 bes_polls; /* number of times NAPI called poll function */
u32 bes_events; /* total evet entries processed */
u32 bes_tx_events; /* number of tx completion events */
u32 bes_rx_events; /* number of ucast rx completion events */
u32 bes_tx_compl; /* number of tx completion entries processed */
u32 bes_rx_compl; /* number of rx completion entries
processed */
u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
failures */
/*
* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr
*/
u32 bes_802_3_dropped_frames;
/*
* number of non ether type II frames malformed where
* in frame len < length field of Mac Hdr
*/
u32 bes_802_3_malformed_frames;
u32 bes_ips; /* interrupts / sec */
u32 bes_prev_ints; /* bes_ints at last IPS calculation */
u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
u32 bes_rx_coal; /* Num pkts coalasced */
u32 bes_rx_flush; /* Num times coalasced */
u32 bes_link_change_physical; /*Num of times physical link changed */
u32 bes_link_change_virtual; /*Num of times virtual link changed */
u32 bes_rx_misc_pkts; /* Misc pkts received */
};
/* Maximum interrupt delay (in microseconds) allowed */
#define MAX_EQD 120
/*
* timer to prevent system shutdown hang for ever if h/w stops responding
*/
struct be_timer_ctxt {
atomic_t get_stat_flag;
struct timer_list get_stats_timer;
unsigned long get_stat_sem_addr;
} ;
/* This structure is the main BladeEngine driver context. */
struct be_adapter {
struct net_device *netdevp;
struct be_drvr_stat be_stat;
struct net_device_stats benet_stats;
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
struct tasklet_struct sts_handler;
struct timer_list cq_timer;
spinlock_t int_lock; /* to protect the isr field in adapter */
struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
/*
* This will enable the use of ethtool to enable or disable
* Checksum on Rx pkts to be obeyed or disobeyed.
* If this is true = 1, then whatever is the checksum on the
* Received pkt as per BE, it will be given to the stack.
* Else the stack will re calculate it.
*/
bool rx_csum;
/*
* This will enable the use of ethtool to enable or disable
* Coalese on Rx pkts to be obeyed or disobeyed.
* If this is grater than 0 and less than 16 then coalascing
* is enabled else it is disabled
*/
u32 max_rx_coal;
struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
spinlock_t txq_lock; /* to stop/wake queue based on tx_q_used */
u32 isr; /* copy of Intr status reg. */
u32 port0_link_sts; /* Port 0 link status */
u32 port1_link_sts; /* port 1 list status */
struct BE_LINK_STATUS *be_link_sts;
/* pointer to the first netobject of this adapter */
struct be_net_object *net_obj;
/* Flags to indicate what to clean up */
bool tasklet_started;
bool isr_registered;
/*
* adaptive interrupt coalescing (AIC) related
*/
bool enable_aic; /* 1 if AIC is enabled */
u16 min_eqd; /* minimum EQ delay in usec */
u16 max_eqd; /* minimum EQ delay in usec */
u16 cur_eqd; /* current EQ delay in usec */
/*
* book keeping for interrupt / sec and TX/RX rate calculation
*/
ulong ips_jiffies; /* jiffies at last IPS calc */
u32 eth_tx_bytes;
ulong eth_tx_jiffies;
u32 eth_rx_bytes;
ulong eth_rx_jiffies;
struct semaphore get_eth_stat_sem;
/* timer ctxt to prevent shutdown hanging due to un-responsive BE */
struct be_timer_ctxt timer_ctxt;
#define BE_MAX_MSIX_VECTORS 32
#define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */
struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
bool msix_enabled;
bool dma_64bit_cap; /* the Device DAC capable or not */
u8 dev_state; /* The current state of the device */
u8 dev_pm_state; /* The State of device before going to suspend */
};
/*
* Every second we look at the ints/sec and adjust eq_delay
* between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
* IPS_HI_WM and IPS_LO_WM.
*/
#define IPS_HI_WM 18000
#define IPS_LO_WM 8000
static inline void index_adv(u32 *index, u32 val, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + val) & (limit - 1);
}
static inline void index_inc(u32 *index, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + 1) & (limit - 1);
}
static inline void be_adv_eq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->event_q_tl, pnob->event_q_len);
}
static inline void be_adv_txq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
}
static inline void be_adv_txq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
}
static inline void be_adv_txcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
}
static inline void be_adv_rxq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
}
static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
}
static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
{
return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
& (pnob->tx_q_len - 1);
}
int benet_init(struct net_device *);
int be_ethtool_ioctl(struct net_device *, struct ifreq *);
struct net_device_stats *benet_get_stats(struct net_device *);
void be_process_intr(unsigned long context);
irqreturn_t be_int(int irq, void *dev);
void be_post_eth_rx_buffs(struct be_net_object *);
void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
void be_get_stats_timer_handler(unsigned long);
void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
void be_print_link_info(struct BE_LINK_STATUS *);
void be_update_link_status(struct be_adapter *);
void be_init_procfs(struct be_adapter *);
void be_cleanup_procfs(struct be_adapter *);
int be_poll(struct napi_struct *, int);
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
void be_notify_cmpl(struct be_net_object *, int, int, int);
void be_enable_intr(struct be_net_object *);
void be_enable_eq_intr(struct be_net_object *);
void be_disable_intr(struct be_net_object *);
void be_disable_eq_intr(struct be_net_object *);
int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
u8 *, mcc_wrb_cqe_callback, void *);
int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
#endif /* _BENET_H_ */

View File

@ -1,103 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BESTATUS_H_
#define _BESTATUS_H_
#define BE_SUCCESS (0x00000000L)
/*
* MessageId: BE_PENDING
* The BladeEngine Driver call succeeded, and pended operation.
*/
#define BE_PENDING (0x20070001L)
#define BE_STATUS_PENDING (BE_PENDING)
/*
* MessageId: BE_NOT_OK
* An error occurred.
*/
#define BE_NOT_OK (0xE0070002L)
/*
* MessageId: BE_STATUS_SYSTEM_RESOURCES
* Insufficient host system resources exist to complete the API.
*/
#define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L)
/*
* MessageId: BE_STATUS_CHIP_RESOURCES
* Insufficient chip resources exist to complete the API.
*/
#define BE_STATUS_CHIP_RESOURCES (0xE0070004L)
/*
* MessageId: BE_STATUS_NO_RESOURCE
* Insufficient resources to complete request.
*/
#define BE_STATUS_NO_RESOURCE (0xE0070005L)
/*
* MessageId: BE_STATUS_BUSY
* Resource is currently busy.
*/
#define BE_STATUS_BUSY (0xE0070006L)
/*
* MessageId: BE_STATUS_INVALID_PARAMETER
* Invalid Parameter in request.
*/
#define BE_STATUS_INVALID_PARAMETER (0xE0000007L)
/*
* MessageId: BE_STATUS_NOT_SUPPORTED
* Requested operation is not supported.
*/
#define BE_STATUS_NOT_SUPPORTED (0xE000000DL)
/*
* ***************************************************************************
* E T H E R N E T S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_ETH_TX_ERROR
* The Ethernet device driver failed to transmit a packet.
*/
#define BE_ETH_TX_ERROR (0xE0070101L)
/*
* ***************************************************************************
* S H A R E D S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_STATUS_VBD_INVALID_VERSION
* The device driver is not compatible with this version of the VBD.
*/
#define BE_STATUS_INVALID_VERSION (0xE0070402L)
/*
* MessageId: BE_STATUS_DOMAIN_DENIED
* The operation failed to complete due to insufficient access
* rights for the requesting domain.
*/
#define BE_STATUS_DOMAIN_DENIED (0xE0070403L)
/*
* MessageId: BE_STATUS_TCP_NOT_STARTED
* The embedded TCP/IP stack has not been started.
*/
#define BE_STATUS_TCP_NOT_STARTED (0xE0070409L)
/*
* MessageId: BE_STATUS_NO_MCC_WRB
* No free MCC WRB are available for posting the request.
*/
#define BE_STATUS_NO_MCC_WRB (0xE0070414L)
#endif /* _BESTATUS_ */

View File

@ -1,243 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __cev_amap_h__
#define __cev_amap_h__
#include "ep.h"
/*
* Host Interrupt Status Register 0. The first of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ0 through EQ31.
*/
struct BE_CEV_ISR0_CSR_AMAP {
u8 interrupt0; /* DWORD 0 */
u8 interrupt1; /* DWORD 0 */
u8 interrupt2; /* DWORD 0 */
u8 interrupt3; /* DWORD 0 */
u8 interrupt4; /* DWORD 0 */
u8 interrupt5; /* DWORD 0 */
u8 interrupt6; /* DWORD 0 */
u8 interrupt7; /* DWORD 0 */
u8 interrupt8; /* DWORD 0 */
u8 interrupt9; /* DWORD 0 */
u8 interrupt10; /* DWORD 0 */
u8 interrupt11; /* DWORD 0 */
u8 interrupt12; /* DWORD 0 */
u8 interrupt13; /* DWORD 0 */
u8 interrupt14; /* DWORD 0 */
u8 interrupt15; /* DWORD 0 */
u8 interrupt16; /* DWORD 0 */
u8 interrupt17; /* DWORD 0 */
u8 interrupt18; /* DWORD 0 */
u8 interrupt19; /* DWORD 0 */
u8 interrupt20; /* DWORD 0 */
u8 interrupt21; /* DWORD 0 */
u8 interrupt22; /* DWORD 0 */
u8 interrupt23; /* DWORD 0 */
u8 interrupt24; /* DWORD 0 */
u8 interrupt25; /* DWORD 0 */
u8 interrupt26; /* DWORD 0 */
u8 interrupt27; /* DWORD 0 */
u8 interrupt28; /* DWORD 0 */
u8 interrupt29; /* DWORD 0 */
u8 interrupt30; /* DWORD 0 */
u8 interrupt31; /* DWORD 0 */
} __packed;
struct CEV_ISR0_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 1. The second of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ32 through EQ63.
*/
struct BE_CEV_ISR1_CSR_AMAP {
u8 interrupt32; /* DWORD 0 */
u8 interrupt33; /* DWORD 0 */
u8 interrupt34; /* DWORD 0 */
u8 interrupt35; /* DWORD 0 */
u8 interrupt36; /* DWORD 0 */
u8 interrupt37; /* DWORD 0 */
u8 interrupt38; /* DWORD 0 */
u8 interrupt39; /* DWORD 0 */
u8 interrupt40; /* DWORD 0 */
u8 interrupt41; /* DWORD 0 */
u8 interrupt42; /* DWORD 0 */
u8 interrupt43; /* DWORD 0 */
u8 interrupt44; /* DWORD 0 */
u8 interrupt45; /* DWORD 0 */
u8 interrupt46; /* DWORD 0 */
u8 interrupt47; /* DWORD 0 */
u8 interrupt48; /* DWORD 0 */
u8 interrupt49; /* DWORD 0 */
u8 interrupt50; /* DWORD 0 */
u8 interrupt51; /* DWORD 0 */
u8 interrupt52; /* DWORD 0 */
u8 interrupt53; /* DWORD 0 */
u8 interrupt54; /* DWORD 0 */
u8 interrupt55; /* DWORD 0 */
u8 interrupt56; /* DWORD 0 */
u8 interrupt57; /* DWORD 0 */
u8 interrupt58; /* DWORD 0 */
u8 interrupt59; /* DWORD 0 */
u8 interrupt60; /* DWORD 0 */
u8 interrupt61; /* DWORD 0 */
u8 interrupt62; /* DWORD 0 */
u8 interrupt63; /* DWORD 0 */
} __packed;
struct CEV_ISR1_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 2. The third of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ64 through EQ95.
*/
struct BE_CEV_ISR2_CSR_AMAP {
u8 interrupt64; /* DWORD 0 */
u8 interrupt65; /* DWORD 0 */
u8 interrupt66; /* DWORD 0 */
u8 interrupt67; /* DWORD 0 */
u8 interrupt68; /* DWORD 0 */
u8 interrupt69; /* DWORD 0 */
u8 interrupt70; /* DWORD 0 */
u8 interrupt71; /* DWORD 0 */
u8 interrupt72; /* DWORD 0 */
u8 interrupt73; /* DWORD 0 */
u8 interrupt74; /* DWORD 0 */
u8 interrupt75; /* DWORD 0 */
u8 interrupt76; /* DWORD 0 */
u8 interrupt77; /* DWORD 0 */
u8 interrupt78; /* DWORD 0 */
u8 interrupt79; /* DWORD 0 */
u8 interrupt80; /* DWORD 0 */
u8 interrupt81; /* DWORD 0 */
u8 interrupt82; /* DWORD 0 */
u8 interrupt83; /* DWORD 0 */
u8 interrupt84; /* DWORD 0 */
u8 interrupt85; /* DWORD 0 */
u8 interrupt86; /* DWORD 0 */
u8 interrupt87; /* DWORD 0 */
u8 interrupt88; /* DWORD 0 */
u8 interrupt89; /* DWORD 0 */
u8 interrupt90; /* DWORD 0 */
u8 interrupt91; /* DWORD 0 */
u8 interrupt92; /* DWORD 0 */
u8 interrupt93; /* DWORD 0 */
u8 interrupt94; /* DWORD 0 */
u8 interrupt95; /* DWORD 0 */
} __packed;
struct CEV_ISR2_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 3. The fourth of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ96 through EQ127.
*/
struct BE_CEV_ISR3_CSR_AMAP {
u8 interrupt96; /* DWORD 0 */
u8 interrupt97; /* DWORD 0 */
u8 interrupt98; /* DWORD 0 */
u8 interrupt99; /* DWORD 0 */
u8 interrupt100; /* DWORD 0 */
u8 interrupt101; /* DWORD 0 */
u8 interrupt102; /* DWORD 0 */
u8 interrupt103; /* DWORD 0 */
u8 interrupt104; /* DWORD 0 */
u8 interrupt105; /* DWORD 0 */
u8 interrupt106; /* DWORD 0 */
u8 interrupt107; /* DWORD 0 */
u8 interrupt108; /* DWORD 0 */
u8 interrupt109; /* DWORD 0 */
u8 interrupt110; /* DWORD 0 */
u8 interrupt111; /* DWORD 0 */
u8 interrupt112; /* DWORD 0 */
u8 interrupt113; /* DWORD 0 */
u8 interrupt114; /* DWORD 0 */
u8 interrupt115; /* DWORD 0 */
u8 interrupt116; /* DWORD 0 */
u8 interrupt117; /* DWORD 0 */
u8 interrupt118; /* DWORD 0 */
u8 interrupt119; /* DWORD 0 */
u8 interrupt120; /* DWORD 0 */
u8 interrupt121; /* DWORD 0 */
u8 interrupt122; /* DWORD 0 */
u8 interrupt123; /* DWORD 0 */
u8 interrupt124; /* DWORD 0 */
u8 interrupt125; /* DWORD 0 */
u8 interrupt126; /* DWORD 0 */
u8 interrupt127; /* DWORD 0 */
} __packed;
struct CEV_ISR3_CSR_AMAP {
u32 dw[1];
};
/* Completions and Events block Registers. */
struct BE_CEV_CSRMAP_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
u8 rsvd2[32]; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
struct BE_CEV_ISR0_CSR_AMAP isr0;
struct BE_CEV_ISR1_CSR_AMAP isr1;
struct BE_CEV_ISR2_CSR_AMAP isr2;
struct BE_CEV_ISR3_CSR_AMAP isr3;
u8 rsvd4[32]; /* DWORD 8 */
u8 rsvd5[32]; /* DWORD 9 */
u8 rsvd6[32]; /* DWORD 10 */
u8 rsvd7[32]; /* DWORD 11 */
u8 rsvd8[32]; /* DWORD 12 */
u8 rsvd9[32]; /* DWORD 13 */
u8 rsvd10[32]; /* DWORD 14 */
u8 rsvd11[32]; /* DWORD 15 */
u8 rsvd12[32]; /* DWORD 16 */
u8 rsvd13[32]; /* DWORD 17 */
u8 rsvd14[32]; /* DWORD 18 */
u8 rsvd15[32]; /* DWORD 19 */
u8 rsvd16[32]; /* DWORD 20 */
u8 rsvd17[32]; /* DWORD 21 */
u8 rsvd18[32]; /* DWORD 22 */
u8 rsvd19[32]; /* DWORD 23 */
u8 rsvd20[32]; /* DWORD 24 */
u8 rsvd21[32]; /* DWORD 25 */
u8 rsvd22[32]; /* DWORD 26 */
u8 rsvd23[32]; /* DWORD 27 */
u8 rsvd24[32]; /* DWORD 28 */
u8 rsvd25[32]; /* DWORD 29 */
u8 rsvd26[32]; /* DWORD 30 */
u8 rsvd27[32]; /* DWORD 31 */
u8 rsvd28[32]; /* DWORD 32 */
u8 rsvd29[32]; /* DWORD 33 */
u8 rsvd30[192]; /* DWORD 34 */
u8 rsvd31[192]; /* DWORD 40 */
u8 rsvd32[160]; /* DWORD 46 */
u8 rsvd33[160]; /* DWORD 51 */
u8 rsvd34[160]; /* DWORD 56 */
u8 rsvd35[96]; /* DWORD 61 */
u8 rsvd36[192][32]; /* DWORD 64 */
} __packed;
struct CEV_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __cev_amap_h__ */

View File

@ -1,211 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
* Completion Queue Objects
*/
/*
*============================================================================
* P U B L I C R O U T I N E S
*============================================================================
*/
/*
This routine creates a completion queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
CqBaseVa - Base VA for a the CQ ring
NumEntries - CEV_CQ_CNT_* values
solEventEnable - 0 = All CQEs can generate Events if CQ is eventable
1 = only CQEs with solicited bit set are eventable
eventable - Eventable CQ, generates interrupts.
nodelay - 1 = Force interrupt, relevent if CQ eventable.
Interrupt is asserted immediately after EQE
write is confirmed, regardless of EQ Timer
or watermark settings.
wme - Enable watermark based coalescing
wmThresh - High watermark(CQ fullness at which event
or interrupt should be asserted). These are the
CEV_WATERMARK encoded values.
EqObject - EQ Handle to assign to this CQ
ppCqObject - Internal CQ Handle returned.
Returns BE_SUCCESS if successfull, otherwise a useful error code is
returned.
IRQL < DISPATCH_LEVEL
*/
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length, bool solicited_eventable,
bool no_delay, u32 wm_thresh,
struct be_eq_object *eq_object, struct be_cq_object *cq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding;
u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(cq_object);
ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_CQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_CQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_CQ_CNT_1024;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
/*
* All cq entries all the same size. Use iSCSI version
* as a test for the proper rd length.
*/
memset(cq_object, 0, sizeof(*cq_object));
atomic_set(&cq_object->ref_count, 0);
cq_object->parent_function = pfob;
cq_object->eq_object = eq_object;
cq_object->num_entries = num_entries;
/* save for MCC cq processing */
cq_object->va = rd->va;
/* map into UT. */
length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
n = (eq_object != NULL);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
&fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
n = eq_object ? eq_object->eq_id : 0;
AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
&fwcmd->params.request.context, num_entries_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
&fwcmd->params.request.context, no_delay);
AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
&fwcmd->params.request.context, solicited_eventable);
n = (wm_thresh != 0xFFFFFFFF);
AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
n = (n ? wm_thresh : 0);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
&fwcmd->params.request.context, n);
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create CQ failed.");
goto Error;
}
/* Remember the CQ id. */
cq_object->cq_id = fwcmd->params.response.cq_id;
/* insert this cq into eq_object reference */
if (eq_object) {
atomic_inc(&eq_object->ref_count);
list_add_tail(&cq_object->cqlist_for_eq,
&eq_object->cq_list_head);
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this object
are released. The on-chip context is also destroyed along with the queue
ID, and any mappings made into the UT.
cq_object - CQ handle returned from cq_object_create.
returns the current reference count on the object
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_cq_destroy(struct be_cq_object *cq_object)
{
int status = 0;
/* Nothing should reference this CQ at this point. */
ASSERT(atomic_read(&cq_object->ref_count) == 0);
/* Send fwcmd to destroy the CQ. */
status = be_function_ring_destroy(cq_object->parent_function,
cq_object->cq_id, FWCMD_RING_TYPE_CQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
/* Remove reference if this is an eventable CQ. */
if (cq_object->eq_object) {
atomic_dec(&cq_object->eq_object->ref_count);
list_del(&cq_object->cqlist_for_eq);
}
return BE_SUCCESS;
}

View File

@ -1,71 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __descriptors_amap_h__
#define __descriptors_amap_h__
/*
* --- IPC_NODE_ID_ENUM ---
* IPC processor id values
*/
#define TPOST_NODE_ID (0) /* TPOST ID */
#define TPRE_NODE_ID (1) /* TPRE ID */
#define TXULP0_NODE_ID (2) /* TXULP0 ID */
#define TXULP1_NODE_ID (3) /* TXULP1 ID */
#define TXULP2_NODE_ID (4) /* TXULP2 ID */
#define RXULP0_NODE_ID (5) /* RXULP0 ID */
#define RXULP1_NODE_ID (6) /* RXULP1 ID */
#define RXULP2_NODE_ID (7) /* RXULP2 ID */
#define MPU_NODE_ID (15) /* MPU ID */
/*
* --- MAC_ID_ENUM ---
* Meaning of the mac_id field in rxpp_eth_d
*/
#define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */
#define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */
#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
#define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */
#define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */
#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
#define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */
/* from 0x8-0x26, one per PD. */
#define LAST_VM_MAC (38) /* PD 31 MAC. */
#define MGMT_MAC (39) /* Management port MAC. */
#define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA = 00
*/
#define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA != 00
*/
#define NULL_MAC (61) /* Promiscuous mode, indicates no match */
#define MCAST_MAC (62) /* Multicast match. */
#define BCAST_MATCH (63) /* Broadcast match. */
#endif /* __descriptors_amap_h__ */

View File

@ -1,179 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __doorbells_amap_h__
#define __doorbells_amap_h__
/* The TX/RDMA send queue doorbell. */
struct BE_SQ_DB_AMAP {
u8 cid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct SQ_DB_AMAP {
u32 dw[1];
};
/* The receive queue doorbell. */
struct BE_RQ_DB_AMAP {
u8 rq[10]; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 Invalidate; /* DWORD 0 */
u8 numPosted[8]; /* DWORD 0 */
} __packed;
struct RQ_DB_AMAP {
u32 dw[1];
};
/*
* The CQ/EQ doorbell. Software MUST set reserved fields in this
* descriptor to zero, otherwise (CEV) hardware will not execute the
* doorbell (flagging a bad_db_qid error instead).
*/
struct BE_CQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 num_popped[13]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
} __packed;
struct CQ_DB_AMAP {
u32 dw[1];
};
struct BE_TPM_RQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 numPosted[11]; /* DWORD 0 */
u8 mss_cnt[5]; /* DWORD 0 */
} __packed;
struct TPM_RQ_DB_AMAP {
u32 dw[1];
};
/*
* Post WRB Queue Doorbell Register used by the host Storage stack
* to notify the controller of a posted Work Request Block
*/
struct BE_WRB_POST_DB_AMAP {
u8 wrb_cid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 wrb_index[8]; /* DWORD 0 */
u8 numberPosted[8]; /* DWORD 0 */
} __packed;
struct WRB_POST_DB_AMAP {
u32 dw[1];
};
/*
* Update Default PDU Queue Doorbell Register used to communicate
* to the controller that the driver has stopped processing the queue
* and where in the queue it stopped, this is
* a CQ Entry Type. Used by storage driver.
*/
struct BE_DEFAULT_PDU_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 cqproc[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct DEFAULT_PDU_DB_AMAP {
u32 dw[1];
};
/* Management Command and Controller default fragment ring */
struct BE_MCC_DB_AMAP {
u8 rid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct MCC_DB_AMAP {
u32 dw[1];
};
/*
* Used for bootstrapping the Host interface. This register is
* used for driver communication with the MPU when no MCC Rings exist.
* The software must write this register twice to post any MCC
* command. First, it writes the register with hi=1 and the upper bits of
* the physical address for the MCC_MAILBOX structure. Software must poll
* the ready bit until this is acknowledged. Then, sotware writes the
* register with hi=0 with the lower bits in the address. It must
* poll the ready bit until the MCC command is complete. Upon completion,
* the MCC_MAILBOX will contain a valid completion queue entry.
*/
struct BE_MPU_MAILBOX_DB_AMAP {
u8 ready; /* DWORD 0 */
u8 hi; /* DWORD 0 */
u8 address[30]; /* DWORD 0 */
} __packed;
struct MPU_MAILBOX_DB_AMAP {
u32 dw[1];
};
/*
* This is the protection domain doorbell register map. Note that
* while this map shows doorbells for all Blade Engine supported
* protocols, not all of these may be valid in a given function or
* protection domain. It is the responsibility of the application
* accessing the doorbells to know which are valid. Each doorbell
* occupies 32 bytes of space, but unless otherwise specified,
* only the first 4 bytes should be written. There are 32 instances
* of these doorbells for the host and 31 virtual machines respectively.
* The host and VMs will only map the doorbell pages belonging to its
* protection domain. It will not be able to touch the doorbells for
* another VM. The doorbells are the only registers directly accessible
* by a virtual machine. Similarly, there are 511 additional
* doorbells for RDMA protection domains. PD 0 for RDMA shares
* the same physical protection domain doorbell page as ETH/iSCSI.
*
*/
struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
u8 rsvd0[512]; /* DWORD 0 */
struct BE_SQ_DB_AMAP rdma_sq_db;
u8 rsvd1[7][32]; /* DWORD 17 */
struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
u8 rsvd2[7][32]; /* DWORD 25 */
struct BE_SQ_DB_AMAP etx_sq_db;
u8 rsvd3[7][32]; /* DWORD 33 */
struct BE_RQ_DB_AMAP rdma_rq_db;
u8 rsvd4[7][32]; /* DWORD 41 */
struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
u8 rsvd5[7][32]; /* DWORD 49 */
struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
u8 rsvd6[7][32]; /* DWORD 57 */
struct BE_RQ_DB_AMAP erx_rq_db;
u8 rsvd7[7][32]; /* DWORD 65 */
struct BE_CQ_DB_AMAP cq_db;
u8 rsvd8[7][32]; /* DWORD 73 */
struct BE_MCC_DB_AMAP mpu_mcc_db;
u8 rsvd9[7][32]; /* DWORD 81 */
struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
u8 rsvd10[935][32]; /* DWORD 89 */
} __packed;
struct PROTECTION_DOMAIN_DBMAP_AMAP {
u32 dw[1024];
};
#endif /* __doorbells_amap_h__ */

View File

@ -1,66 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __ep_amap_h__
#define __ep_amap_h__
/* General Control and Status Register. */
struct BE_EP_CONTROL_CSR_AMAP {
u8 m0_RxPbuf; /* DWORD 0 */
u8 m1_RxPbuf; /* DWORD 0 */
u8 m2_RxPbuf; /* DWORD 0 */
u8 ff_en; /* DWORD 0 */
u8 rsvd0[27]; /* DWORD 0 */
u8 CPU_reset; /* DWORD 0 */
} __packed;
struct EP_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_EP_SEMAPHORE_CSR_AMAP {
u8 value[32]; /* DWORD 0 */
} __packed;
struct EP_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Embedded Processor Specific Registers. */
struct BE_EP_CSRMAP_AMAP {
struct BE_EP_CONTROL_CSR_AMAP ep_control;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
u8 rsvd3[32]; /* DWORD 4 */
u8 rsvd4[32]; /* DWORD 5 */
u8 rsvd5[8][128]; /* DWORD 6 */
u8 rsvd6[32]; /* DWORD 38 */
u8 rsvd7[32]; /* DWORD 39 */
u8 rsvd8[32]; /* DWORD 40 */
u8 rsvd9[32]; /* DWORD 41 */
u8 rsvd10[32]; /* DWORD 42 */
struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
u8 rsvd11[32]; /* DWORD 44 */
u8 rsvd12[19][32]; /* DWORD 45 */
} __packed;
struct EP_CSRMAP_AMAP {
u32 dw[64];
};
#endif /* __ep_amap_h__ */

View File

@ -1,299 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
This routine creates an event queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
EqBaseVa - Base VA for a the EQ ring
SizeEncoding - The encoded size for the EQ entries. This value is
either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
NumEntries - CEV_CQ_CNT_* values.
Watermark - Enables watermark based coalescing. This parameter
must be of the type CEV_WMARK_* if watermarks
are enabled. If watermarks to to be disabled
this value should be-1.
TimerDelay - If a timer delay is enabled this value should be the
time of the delay in 8 microsecond units. If
delays are not used this parameter should be
set to -1.
ppEqObject - Internal EQ Handle returned.
Returns BE_SUCCESS if successfull,, otherwise a useful error code
is returned.
IRQL < DISPATCH_LEVEL
*/
int
be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, /* CEV_WMARK_* or -1 */
u32 timer_delay, /* in 8us units, or -1 */
struct be_eq_object *eq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding, eqe_size_encoding, length;
struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(eq_object);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_EQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_EQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_EQ_CNT_1024;
break;
case 2048:
num_entries_encoding = CEV_EQ_CNT_2048;
break;
case 4096:
num_entries_encoding = CEV_EQ_CNT_4096;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
switch (eqe_size) {
case 4:
eqe_size_encoding = CEV_EQ_SIZE_4;
break;
case 16:
eqe_size_encoding = CEV_EQ_SIZE_16;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
if ((eqe_size == 4 && num_entries < 1024) ||
(eqe_size == 16 && num_entries == 4096)) {
TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
eqe_size, num_entries);
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
memset(eq_object, 0, sizeof(*eq_object));
atomic_set(&eq_object->ref_count, 0);
eq_object->parent_function = pfob;
eq_object->eq_id = 0xFFFFFFFF;
INIT_LIST_HEAD(&eq_object->cq_list_head);
length = num_entries * eqe_size;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
&fwcmd->params.request.context, eqe_size_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
/* Let the caller ARM the EQ with the doorbell. */
AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
num_entries_encoding);
n = pfob->pci_function_number * 32;
AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
&fwcmd->params.request.context, n);
if (watermark != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
&fwcmd->params.request.context, watermark);
ASSERT(watermark <= CEV_WMARK_240);
} else
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 0);
if (timer_delay != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 1);
ASSERT(timer_delay <= 250); /* max value according to EAS */
timer_delay = min(timer_delay, (u32)250);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
&fwcmd->params.request.context, timer_delay);
} else {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 0);
}
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create EQ failed.");
goto Error;
}
/* Get the EQ id. The MPU allocates the IDs. */
eq_object->eq_id = fwcmd->params.response.eq_id;
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this
object are released. The on-chip context is also destroyed along with
the queue ID, and any mappings made into the UT.
eq_object - EQ handle returned from eq_object_create.
Returns BE_SUCCESS if successfull, otherwise a useful error code
is returned.
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_eq_destroy(struct be_eq_object *eq_object)
{
int status = 0;
ASSERT(atomic_read(&eq_object->ref_count) == 0);
/* no CQs should reference this EQ now */
ASSERT(list_empty(&eq_object->cq_list_head));
/* Send fwcmd to destroy the EQ. */
status = be_function_ring_destroy(eq_object->parent_function,
eq_object->eq_id, FWCMD_RING_TYPE_EQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
return BE_SUCCESS;
}
/*
*---------------------------------------------------------------------------
* Function: be_eq_modify_delay
* Changes the EQ delay for a group of EQs.
* num_eq - The number of EQs in the eq_array to adjust.
* This also is the number of delay values in
* the eq_delay_array.
* eq_array - Array of struct be_eq_object pointers to adjust.
* eq_delay_array - Array of "num_eq" timer delays in units
* of microseconds. The be_eq_query_delay_range
* fwcmd returns the resolution and range of
* legal EQ delays.
* cb -
* cb_context -
* q_ctxt - Optional. Pointer to a previously allocated
* struct. If the MCC WRB ring is full, this
* structure is used to queue the operation. It
* will be posted to the MCC ring when space
* becomes available. All queued commands will
* be posted to the ring in the order they are
* received. It is always valid to pass a pointer to
* a generic be_generic_q_cntxt. However,
* the specific context structs
* are generally smaller than the generic struct.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*-------------------------------------------------------------------------
*/
int
be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
{
struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
struct be_generic_q_ctxt *gen_ctxt = NULL;
u32 i;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
if (q_ctxt && cb) {
wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
gen_ctxt->context.bytes = sizeof(*q_ctxt);
} else {
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
ASSERT(num_eq > 0);
ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
fwcmd->params.request.num_eq = num_eq;
for (i = 0; i < num_eq; i++) {
fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
fwcmd->params.request.delay[i].delay_in_microseconds =
eq_delay_array[i];
}
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
cb, cb_context, NULL, NULL, fwcmd, NULL);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __etx_context_amap_h__
#define __etx_context_amap_h__
/* ETX ring context structure. */
struct BE_ETX_CONTEXT_AMAP {
u8 tx_cidx[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 tx_pidx[11]; /* DWORD 1 */
u8 rsvd2; /* DWORD 1 */
u8 tx_ring_size[4]; /* DWORD 1 */
u8 pd_id[5]; /* DWORD 1 */
u8 pd_id_not_valid; /* DWORD 1 */
u8 cq_id_send[10]; /* DWORD 1 */
u8 rsvd3[32]; /* DWORD 2 */
u8 rsvd4[32]; /* DWORD 3 */
u8 cur_bytes[32]; /* DWORD 4 */
u8 max_bytes[32]; /* DWORD 5 */
u8 time_stamp[32]; /* DWORD 6 */
u8 rsvd5[11]; /* DWORD 7 */
u8 func; /* DWORD 7 */
u8 rsvd6[20]; /* DWORD 7 */
u8 cur_txd_count[32]; /* DWORD 8 */
u8 max_txd_count[32]; /* DWORD 9 */
u8 rsvd7[32]; /* DWORD 10 */
u8 rsvd8[32]; /* DWORD 11 */
u8 rsvd9[32]; /* DWORD 12 */
u8 rsvd10[32]; /* DWORD 13 */
u8 rsvd11[32]; /* DWORD 14 */
u8 rsvd12[32]; /* DWORD 15 */
} __packed;
struct ETX_CONTEXT_AMAP {
u32 dw[16];
};
#endif /* __etx_context_amap_h__ */

View File

@ -1,565 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
int
be_function_internal_query_firmware_config(struct be_function_object *pfob,
struct BE_FIRMWARE_CONFIG *config)
{
struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.va = config;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
NULL, NULL, NULL, fwcmd, &rc);
error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
This allocates and initializes a function object based on the information
provided by upper layer drivers.
Returns BE_SUCCESS on success and an appropriate int on failure.
A function object represents a single BladeEngine (logical) PCI function.
That is a function object either represents
the networking side of BladeEngine or the iSCSI side of BladeEngine.
This routine will also detect and create an appropriate PD object for the
PCI function as needed.
*/
int
be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type,
struct ring_desc *mailbox, struct be_function_object *pfob)
{
int status;
ASSERT(pfob); /* not a magic assert */
ASSERT(function_type <= 2);
TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
(function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
(function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
"Arm")), pfob);
memset(pfob, 0, sizeof(*pfob));
pfob->type = function_type;
pfob->csr_va = csr_va;
pfob->db_va = db_va;
pfob->pci_va = pci_va;
spin_lock_init(&pfob->cq_lock);
spin_lock_init(&pfob->post_lock);
spin_lock_init(&pfob->mcc_context_lock);
pfob->pci_function_number = 1;
pfob->emulate = false;
TRACE(DL_NOTE, "Non-emulation mode");
status = be_drive_POST(pfob);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "BladeEngine POST failed.");
goto error;
}
/* Initialize the mailbox */
status = be_mpu_init_mailbox(pfob, mailbox);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to initialize mailbox.");
goto error;
}
/*
* Cache the firmware config for ASSERTs in hwclib and later
* driver queries.
*/
status = be_function_internal_query_firmware_config(pfob,
&pfob->fw_config);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to query firmware config.");
goto error;
}
error:
if (status != BE_SUCCESS) {
/* No cleanup necessary */
TRACE(DL_ERR, "Failed to create function.");
memset(pfob, 0, sizeof(*pfob));
}
return status;
}
/*
This routine drops the reference count on a given function object. Once
the reference count falls to zero, the function object is destroyed and all
resources held are freed.
FunctionObject - The function object to drop the reference to.
*/
int be_function_object_destroy(struct be_function_object *pfob)
{
TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
pfob);
ASSERT(pfob->mcc == NULL);
return BE_SUCCESS;
}
int be_function_cleanup(struct be_function_object *pfob)
{
int status = 0;
u32 isr;
u32 host_intr;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
status = be_rxf_multicast_config(pfob, false, 0,
NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
}
/* VLAN */
status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
/*
* MCC Queue -- Switches to mailbox mode. May want to destroy
* all but the MCC CQ before this call if polling CQ is much better
* performance than polling mailbox register.
*/
if (pfob->mcc)
status = be_mcc_ring_destroy(pfob->mcc);
/*
* If interrupts are disabled, clear any CEV interrupt assertions that
* fired after we stopped processing EQs.
*/
ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
hostintr, ctrl.dw);
if (!host_intr)
if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
isr = CSR_READ(pfob, cev.isr1);
else
isr = CSR_READ(pfob, cev.isr0);
else
/* This should never happen... */
TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
/* Function object destroy */
status = be_function_object_destroy(pfob);
ASSERT(status == BE_SUCCESS);
return status;
}
void *
be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
ASSERT(wrb);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
void *
be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
void *fwcmd_va, u64 fwcmd_pa,
u32 payld_len,
u32 request_length,
u32 response_length,
u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
struct MCC_WRB_PAYLOAD_AMAP *plp;
ASSERT(wrb);
ASSERT(fwcmd_va);
header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
/*
* Assume one fragment. The caller may override the SGL by
* rewriting the 0th length and adding more entries. They
* will also need to update the sge_count.
*/
AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
upper_32_bits(fwcmd_pa));
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob)
{
struct MCC_WRB_AMAP *wrb = NULL;
u32 offset;
if (pfob->mcc)
wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
else {
offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
offset);
}
if (wrb)
memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
return wrb;
}
#if defined(BE_DEBUG)
void be_function_debug_print_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
struct be_mcc_wrb_context *wrb_context)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u8 embedded;
u32 n;
embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
if (embedded) {
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
} else {
header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
}
/* Save the completed count before posting for a debug assert. */
if (header) {
wrb_context->opcode = header->opcode;
wrb_context->subsystem = header->subsystem;
} else {
wrb_context->opcode = 0;
wrb_context->subsystem = 0;
}
}
#else
#define be_function_debug_print_wrb(a_, b_, c_, d_)
#endif
int
be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *rc)
{
int status;
struct be_mcc_wrb_context *wrb_context = NULL;
u64 *p;
if (q_ctxt) {
/* Initialize context. */
q_ctxt->context.internal_cb = internal_cb;
q_ctxt->context.internal_cb_context = internal_cb_context;
q_ctxt->context.cb = cb;
q_ctxt->context.cb_context = cb_context;
if (rc) {
q_ctxt->context.copy.length = rc->length;
q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
q_ctxt->context.copy.va = rc->va;
} else
q_ctxt->context.copy.length = 0;
q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
/* Queue this request */
status = be_function_queue_mcc_wrb(pfob, q_ctxt);
goto Error;
}
/*
* Allocate a WRB context struct to hold the callback pointers,
* status, etc. This is required if commands complete out of order.
*/
wrb_context = _be_mcc_allocate_wrb_context(pfob);
if (!wrb_context) {
TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
status = BE_STATUS_SYSTEM_RESOURCES;
goto Error;
}
/* Initialize context. */
memset(wrb_context, 0, sizeof(*wrb_context));
wrb_context->internal_cb = internal_cb;
wrb_context->internal_cb_context = internal_cb_context;
wrb_context->cb = cb;
wrb_context->cb_context = cb_context;
if (rc) {
wrb_context->copy.length = rc->length;
wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
wrb_context->copy.va = rc->va;
} else
wrb_context->copy.length = 0;
wrb_context->wrb = wrb;
/*
* Copy the context pointer into the WRB opaque tag field.
* Verify assumption of 64-bit tag with a compile time assert.
*/
p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
*p = (u64)(size_t)wrb_context;
/* Print info about this FWCMD for debug builds. */
be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/*
* we're in WRB mode, pass to the mcc layer
*/
status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
} else {
/*
* we're in mailbox mode
*/
status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
/* mailbox mode always completes synchronously */
ASSERT(status != BE_STATUS_PENDING);
}
Error:
return status;
}
int
be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context, mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context)
{
struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
fwcmd->params.request.id = id;
fwcmd->params.request.ring_type = ring_type;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
internal_cb, internal_cb_context, fwcmd, NULL);
if (status != BE_SUCCESS && status != BE_PENDING) {
TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
id, ring_type);
goto Error;
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
void
be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
{
u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
u32 i = 0;
u64 pa = rd->pa;
__le64 lepa;
ASSERT(pa_list);
ASSERT(pa);
for (i = 0; i < min(num_pages, max_num); i++) {
lepa = cpu_to_le64(pa);
pa_list[i].lo = (u32)lepa;
pa_list[i].hi = upper_32_bits(lepa);
pa += PAGE_SIZE;
}
}
/*-----------------------------------------------------------------------------
* Function: be_function_get_fw_version
* Retrieves the firmware version on the adpater. If the callback is
* NULL this call executes synchronously. If the callback is not NULL,
* the returned status will be BE_PENDING if the command was issued
* successfully.
* pfob -
* fwv - Pointer to response buffer if callback is NULL.
* cb - Callback function invoked when the FWCMD completes.
* cb_context - Passed to the callback function.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*---------------------------------------------------------------------------
*/
int
be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
mcc_wrb_cqe_callback cb, void *cb_context)
{
int status = BE_SUCCESS;
struct MCC_WRB_AMAP *wrb = NULL;
struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
if (!cb && !fwv) {
TRACE(DL_ERR, "callback and response buffer NULL!");
status = BE_NOT_OK;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.va = fwv;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
cb_context, NULL, NULL, fwcmd, &rc);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
int
be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt)
{
int status;
ASSERT(q_ctxt);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/* We're in ring mode. Queue this item. */
pfob->mcc->backlog_length++;
list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
status = BE_PENDING;
} else {
status = BE_NOT_OK;
}
return status;
}

View File

@ -1,222 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_amap_h__
#define __fwcmd_common_amap_h__
#include "host_struct.h"
/* --- PHY_LINK_DUPLEX_ENUM --- */
#define PHY_LINK_DUPLEX_NONE (0)
#define PHY_LINK_DUPLEX_HALF (1)
#define PHY_LINK_DUPLEX_FULL (2)
/* --- PHY_LINK_SPEED_ENUM --- */
#define PHY_LINK_SPEED_ZERO (0) /* No link. */
#define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */
#define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */
#define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */
#define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */
/* --- PHY_LINK_FAULT_ENUM --- */
#define PHY_LINK_FAULT_NONE (0) /* No fault status
available or detected */
#define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */
#define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */
/* --- BE_ULP_MASK --- */
#define BE_ULP0_MASK (1)
#define BE_ULP1_MASK (2)
#define BE_ULP2_MASK (4)
/* --- NTWK_ACTIVE_PORT --- */
#define NTWK_PORT_A (0) /* Port A is currently active */
#define NTWK_PORT_B (1) /* Port B is currently active */
#define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */
/* --- NTWK_LINK_TYPE --- */
#define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event
applies to BladeEngine's
Physical Ports
*/
#define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event
reported by BladeExchange.
This applies only when the
VLD feature is enabled
*/
/*
* --- FWCMD_MAC_TYPE_ENUM ---
* This enum defines the types of MAC addresses in the RXF MAC Address Table.
*/
#define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */
#define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */
#define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */
#define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */
/* --- FWCMD_RING_TYPE_ENUM --- */
#define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */
/* FWCMD_COMMON_ETH_RX_CREATE. */
#define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */
/* FWCMD_COMMON_ETH_TX_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */
/* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */
/* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */
/* FWCMD_COMMON_TPM_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */
/* FWCMD_COMMONTPM_TDEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */
/* FWCMD_COMMON_TPM_RQ_CREATE. */
#define FWCMD_RING_TYPE_MCC (8) /* Ring created with */
/* FWCMD_COMMON_MCC_CREATE. */
#define FWCMD_RING_TYPE_CQ (9) /* Ring created with */
/* FWCMD_COMMON_CQ_CREATE. */
#define FWCMD_RING_TYPE_EQ (10) /* Ring created with */
/* FWCMD_COMMON_EQ_CREATE. */
#define FWCMD_RING_TYPE_QP (11) /* Ring created with */
/* FWCMD_RDMA_QP_CREATE. */
/* --- ETH_TX_RING_TYPE_ENUM --- */
#define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for
forwarding packets */
#define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending
network packets. */
#define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the
port specified in the command
header.port_number field.
Rings of this type are
NOT subject to the
failover logic implemented
in the BladeEngine.
*/
/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
#define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */
/* field is valid. */
#define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */
/* field is valid. */
#define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */
/*field is valid. */
#define QOS_VLAN_TAG (8) /* domain_VLAN_tag field
is valid. */
#define QOS_FABRIC_ID (16) /* fabric_domain_ID field
is valid. */
#define QOS_OEM_PARAMS (32) /* qos_params_oem field
is valid. */
#define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI
field is valid. */
/*
* --- FAILOVER_CONFIG_ENUM ---
* Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */
/* port failover setting. */
#define FAILOVER_CONFIG_ON (1) /* Automatic port failover
on link down is enabled. */
#define FAILOVER_CONFIG_OFF (2) /* Automatic port failover
on link down is disabled. */
/*
* --- FAILOVER_PORT_ENUM ---
* Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_PORT_A (0) /* Selects port A. */
#define FAILOVER_PORT_B (1) /* Selects port B. */
#define FAILOVER_PORT_NONE (15) /* No port change requested. */
/*
* --- MGMT_FLASHROM_OPCODE ---
* Flash ROM operation code
*/
#define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data
to Flash ROM */
#define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to
ARM's DDR - do not flash */
#define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component
from FlashROM */
#define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component
from Flash ROM */
#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a
component */
/*
* --- MGMT_FLASHROM_OPTYPE ---
* Flash ROM operation type
*/
#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
IPSec (optional) and EP
firmware */
#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
#define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2)
#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5)
#define MGMT_FLASHROM_OPTYPE_CFG_INI (6)
#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
/*
* --- FLASHROM_TYPE ---
* Flash ROM manufacturers supported in the f/w
*/
#define INTEL (0)
#define SPANSION (1)
#define MICRON (2)
/* --- DDR_CAS_TYPE --- */
#define CAS_3 (0)
#define CAS_4 (1)
#define CAS_5 (2)
/* --- DDR_SIZE_TYPE --- */
#define SIZE_256MB (0)
#define SIZE_512MB (1)
/* --- DDR_MODE_TYPE --- */
#define DDR_NO_ECC (0)
#define DDR_ECC (1)
/* --- INTERFACE_10GB_TYPE --- */
#define CX4_TYPE (0)
#define XFP_TYPE (1)
/* --- BE_CHIP_MAX_MTU --- */
#define CHIP_MAX_MTU (9000)
/* --- XAUI_STATE_ENUM --- */
#define XAUI_STATE_ENABLE (0) /* This MUST be the default
value for all requests
which set/change
equalization parameter. */
#define XAUI_STATE_DISABLE (255) /* The XAUI for both ports
may be disabled for EMI
tests. There is no
provision for turning off
individual ports.
*/
/* --- BE_ASIC_REVISION --- */
#define BE_ASIC_REV_A0 (1)
#define BE_ASIC_REV_A1 (2)
#endif /* __fwcmd_common_amap_h__ */

View File

@ -1,717 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_bmap_h__
#define __fwcmd_common_bmap_h__
#include "fwcmd_types_bmap.h"
#include "fwcmd_hdr_bmap.h"
#if defined(__BIG_ENDIAN)
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
#else
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
struct BE_LINK_STATUS {
u8 mac0_duplex;
u8 mac0_speed;
u8 mac1_duplex;
u8 mac1_speed;
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u8 active_port;
u8 rsvd0;
u8 mac0_fault;
u8 mac1_fault;
u16 rsvd1;
} __packed;
#endif
struct FWCMD_COMMON_ANON_170_REQUEST {
u32 rsvd0;
} __packed;
union LINK_STATUS_QUERY_PARAMS {
struct BE_LINK_STATUS response;
struct FWCMD_COMMON_ANON_170_REQUEST request;
} __packed;
/*
* Queries the the link status for all ports. The valid values below
* DO NOT indicate that a particular duplex or speed is supported by
* BladeEngine. These enumerations simply list all possible duplexes
* and speeds for any port. Consult BladeEngine product documentation
* for the supported parameters.
*/
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
union FWCMD_HEADER header;
union LINK_STATUS_QUERY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_171_REQUEST {
u8 type;
u8 port;
u8 mac1;
u8 permanent;
} __packed;
struct FWCMD_COMMON_ANON_172_RESPONSE {
struct MAC_ADDRESS_FORMAT mac;
} __packed;
union NTWK_MAC_QUERY_PARAMS {
struct FWCMD_COMMON_ANON_171_REQUEST request;
struct FWCMD_COMMON_ANON_172_RESPONSE response;
} __packed;
/* Queries one MAC address. */
struct FWCMD_COMMON_NTWK_MAC_QUERY {
union FWCMD_HEADER header;
union NTWK_MAC_QUERY_PARAMS params;
} __packed;
struct MAC_SET_PARAMS_IN {
u8 type;
u8 port;
u8 mac1;
u8 invalidate;
struct MAC_ADDRESS_FORMAT mac;
} __packed;
struct MAC_SET_PARAMS_OUT {
u32 rsvd0;
} __packed;
union MAC_SET_PARAMS {
struct MAC_SET_PARAMS_IN request;
struct MAC_SET_PARAMS_OUT response;
} __packed;
/* Sets a MAC address. */
struct FWCMD_COMMON_NTWK_MAC_SET {
union FWCMD_HEADER header;
union MAC_SET_PARAMS params;
} __packed;
/* MAC address list. */
struct NTWK_MULTICAST_MAC_LIST {
u8 byte[6];
} __packed;
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
u16 num_mac;
u8 promiscuous;
u8 rsvd0;
struct NTWK_MULTICAST_MAC_LIST mac[32];
} __packed;
struct FWCMD_COMMON_ANON_174_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_173_PARAMS {
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_174_RESPONSE response;
} __packed;
/*
* Sets multicast address hash. The MPU will merge the MAC address lists
* from all clients, including the networking and storage functions.
* This command may fail if the final merged list of MAC addresses exceeds
* 32 entries.
*/
struct FWCMD_COMMON_NTWK_MULTICAST_SET {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_173_PARAMS params;
} __packed;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
u16 num_vlan;
u8 promiscuous;
u8 rsvd0;
u16 vlan_tag[32];
} __packed;
struct FWCMD_COMMON_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_175_PARAMS {
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_176_RESPONSE response;
} __packed;
/*
* Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
* clients, including the networking and storage functions. This command
* may fail if the final vlan_tag array (from all functions) is longer
* than 32 entries.
*/
struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_175_PARAMS params;
} __packed;
struct RING_DESTROY_REQUEST {
u16 ring_type;
u16 id;
u8 bypass_flush;
u8 rsvd0;
u16 rsvd1;
} __packed;
struct FWCMD_COMMON_ANON_190_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_189_PARAMS {
struct RING_DESTROY_REQUEST request;
struct FWCMD_COMMON_ANON_190_RESPONSE response;
} __packed;
/*
* Command for destroying any ring. The connection(s) using the ring should
* be quiesced before destroying the ring.
*/
struct FWCMD_COMMON_RING_DESTROY {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_189_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_192_REQUEST {
u16 num_pages;
u16 rsvd0;
struct CQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[4];
} __packed ;
struct FWCMD_COMMON_ANON_193_RESPONSE {
u16 cq_id;
} __packed ;
union FWCMD_COMMON_ANON_191_PARAMS {
struct FWCMD_COMMON_ANON_192_REQUEST request;
struct FWCMD_COMMON_ANON_193_RESPONSE response;
} __packed ;
/*
* Command for creating a completion queue. A Completion Queue must span
* at least 1 page and at most 4 pages. Each completion queue entry
* is 16 bytes regardless of CQ entry format. Thus the ring must be
* at least 256 entries deep (corresponding to 1 page) and can be at
* most 1024 entries deep (corresponding to 4 pages). The number of
* pages posted must contain the CQ ring size as encoded in the context.
*
*/
struct FWCMD_COMMON_CQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_191_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_198_REQUEST {
u16 num_pages;
u16 rsvd0;
struct EQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_199_RESPONSE {
u16 eq_id;
} __packed ;
union FWCMD_COMMON_ANON_197_PARAMS {
struct FWCMD_COMMON_ANON_198_REQUEST request;
struct FWCMD_COMMON_ANON_199_RESPONSE response;
} __packed ;
/*
* Command for creating a event queue. An Event Queue must span at least
* 1 page and at most 8 pages. The number of pages posted must contain
* the EQ ring. The ring is defined by the size of the EQ entries (encoded
* in the context) and the number of EQ entries (also encoded in the
* context).
*/
struct FWCMD_COMMON_EQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_197_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_201_REQUEST {
u16 cq_id;
u16 bcmc_cq_id;
u16 num_pages;
u16 rsvd0;
struct PHYS_ADDR pages[2];
} __packed;
struct FWCMD_COMMON_ANON_202_RESPONSE {
u16 id;
} __packed;
union FWCMD_COMMON_ANON_200_PARAMS {
struct FWCMD_COMMON_ANON_201_REQUEST request;
struct FWCMD_COMMON_ANON_202_RESPONSE response;
} __packed;
/*
* Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D
* entries (8 bytes each). An ERX ring must be 1024 entries deep
* (corresponding to 2 pages).
*/
struct FWCMD_COMMON_ETH_RX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_200_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_204_REQUEST {
u16 num_pages;
u8 ulp_num;
u8 type;
struct ETX_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_205_RESPONSE {
u16 cid;
u8 ulp_num;
u8 rsvd0;
} __packed ;
union FWCMD_COMMON_ANON_203_PARAMS {
struct FWCMD_COMMON_ANON_204_REQUEST request;
struct FWCMD_COMMON_ANON_205_RESPONSE response;
} __packed ;
/*
* Command for creating an Ethernet transmit ring. An ETX ring contains
* ETH_WRB entries (16 bytes each). An ETX ring must be at least 256
* entries deep (corresponding to 1 page) and at most 2k entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_ETH_TX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_203_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_222_REQUEST {
u16 num_pages;
u16 rsvd0;
struct MCC_RING_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_223_RESPONSE {
u16 id;
} __packed ;
union FWCMD_COMMON_ANON_221_PARAMS {
struct FWCMD_COMMON_ANON_222_REQUEST request;
struct FWCMD_COMMON_ANON_223_RESPONSE response;
} __packed ;
/*
* Command for creating the MCC ring. An MCC ring must be at least 16
* entries deep (corresponding to 1 page) and at most 128 entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_MCC_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_221_PARAMS params;
} __packed ;
struct GET_QOS_IN {
u32 qos_params_rsvd;
} __packed;
struct GET_QOS_OUT {
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
union GET_QOS_PARAMS {
struct GET_QOS_IN request;
struct GET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_GET_QOS {
union FWCMD_HEADER header;
union GET_QOS_PARAMS params;
} __packed;
struct SET_QOS_IN {
u32 valid_flags;
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
struct SET_QOS_OUT {
u32 qos_params_rsvd;
} __packed;
union SET_QOS_PARAMS {
struct SET_QOS_IN request;
struct SET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_SET_QOS {
union FWCMD_HEADER header;
union SET_QOS_PARAMS params;
} __packed;
struct SET_FRAME_SIZE_IN {
u32 max_tx_frame_size;
u32 max_rx_frame_size;
} __packed;
struct SET_FRAME_SIZE_OUT {
u32 chip_max_tx_frame_size;
u32 chip_max_rx_frame_size;
} __packed;
union SET_FRAME_SIZE_PARAMS {
struct SET_FRAME_SIZE_IN request;
struct SET_FRAME_SIZE_OUT response;
} __packed;
/* Set frame size command. Only host domain may issue this command. */
struct FWCMD_COMMON_SET_FRAME_SIZE {
union FWCMD_HEADER header;
union SET_FRAME_SIZE_PARAMS params;
} __packed;
struct FORCE_FAILOVER_IN {
u32 move_to_port;
u32 failover_config;
} __packed;
struct FWCMD_COMMON_ANON_231_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_230_PARAMS {
struct FORCE_FAILOVER_IN request;
struct FWCMD_COMMON_ANON_231_RESPONSE response;
} __packed;
/*
* Use this command to control failover in BladeEngine. It may be used
* to failback to a restored port or to forcibly move traffic from
* one port to another. It may also be used to enable or disable the
* automatic failover feature. This command can only be issued by domain
* 0.
*/
struct FWCMD_COMMON_FORCE_FAILOVER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_230_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_240_REQUEST {
u64 context;
} __packed;
struct FWCMD_COMMON_ANON_241_RESPONSE {
u64 context;
} __packed;
union FWCMD_COMMON_ANON_239_PARAMS {
struct FWCMD_COMMON_ANON_240_REQUEST request;
struct FWCMD_COMMON_ANON_241_RESPONSE response;
} __packed;
/*
* This command can be used by clients as a no-operation request. Typical
* uses for drivers are as a heartbeat mechanism, or deferred processing
* catalyst. The ARM will always complete this command with a good completion.
* The 64-bit parameter is not touched by the ARM processor.
*/
struct FWCMD_COMMON_NOP {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_239_PARAMS params;
} __packed;
struct NTWK_RX_FILTER_SETTINGS {
u8 promiscuous;
u8 ip_cksum;
u8 tcp_cksum;
u8 udp_cksum;
u8 pass_err;
u8 pass_ckerr;
u8 strip_crc;
u8 mcast_en;
u8 bcast_en;
u8 mcast_promiscuous_en;
u8 unicast_en;
u8 vlan_promiscuous;
} __packed;
union FWCMD_COMMON_ANON_242_PARAMS {
struct NTWK_RX_FILTER_SETTINGS request;
struct NTWK_RX_FILTER_SETTINGS response;
} __packed;
/*
* This command is used to modify the ethernet receive filter configuration.
* Only domain 0 network function drivers may issue this command. The
* applied configuration is returned in the response payload. Note:
* Some receive packet filter settings are global on BladeEngine and
* can affect both the storage and network function clients that the
* BladeEngine hardware and firmware serve. Additionaly, depending
* on the revision of BladeEngine, some ethernet receive filter settings
* are dependent on others. If a dependency exists between settings
* for the BladeEngine revision, and the command request settings do
* not meet the dependency requirement, the invalid settings will not
* be applied despite the comand succeeding. For example: a driver may
* request to enable broadcast packets, but not enable multicast packets.
* On early revisions of BladeEngine, there may be no distinction between
* broadcast and multicast filters, so broadcast could not be enabled
* without enabling multicast. In this scenario, the comand would still
* succeed, but the response payload would indicate the previously
* configured broadcast and multicast setting.
*/
struct FWCMD_COMMON_NTWK_RX_FILTER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_242_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_244_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
u8 firmware_version_string[32];
u8 fw_on_flash_version_string[32];
} __packed;
union FWCMD_COMMON_ANON_243_PARAMS {
struct FWCMD_COMMON_ANON_244_REQUEST request;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
} __packed;
/* This comand retrieves the firmware version. */
struct FWCMD_COMMON_GET_FW_VERSION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_243_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_246_REQUEST {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
struct FWCMD_COMMON_ANON_247_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_245_PARAMS {
struct FWCMD_COMMON_ANON_246_REQUEST request;
struct FWCMD_COMMON_ANON_247_RESPONSE response;
} __packed;
/*
* This comand is used to program BladeEngine flow control behavior.
* Only the host networking driver is allowed to use this comand.
*/
struct FWCMD_COMMON_SET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_245_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_249_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_ANON_250_RESPONSE {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
union FWCMD_COMMON_ANON_248_PARAMS {
struct FWCMD_COMMON_ANON_249_REQUEST request;
struct FWCMD_COMMON_ANON_250_RESPONSE response;
} __packed;
/* This comand is used to read BladeEngine flow control settings. */
struct FWCMD_COMMON_GET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_248_PARAMS params;
} __packed;
struct EQ_DELAY_PARAMS {
u32 eq_id;
u32 delay_in_microseconds;
} __packed;
struct FWCMD_COMMON_ANON_257_REQUEST {
u32 num_eq;
u32 rsvd0;
struct EQ_DELAY_PARAMS delay[16];
} __packed;
struct FWCMD_COMMON_ANON_258_RESPONSE {
u32 delay_resolution_in_microseconds;
u32 delay_max_in_microseconds;
} __packed;
union MODIFY_EQ_DELAY_PARAMS {
struct FWCMD_COMMON_ANON_257_REQUEST request;
struct FWCMD_COMMON_ANON_258_RESPONSE response;
} __packed;
/* This comand changes the EQ delay for a given set of EQs. */
struct FWCMD_COMMON_MODIFY_EQ_DELAY {
union FWCMD_HEADER header;
union MODIFY_EQ_DELAY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_260_REQUEST {
u32 rsvd0;
} __packed;
struct BE_FIRMWARE_CONFIG {
u16 be_config_number;
u16 asic_revision;
u32 nic_ulp_mask;
u32 tulp_mask;
u32 iscsi_ulp_mask;
u32 rdma_ulp_mask;
u32 rsvd0[4];
u32 eth_tx_id_start;
u32 eth_tx_id_count;
u32 eth_rx_id_start;
u32 eth_rx_id_count;
u32 tpm_wrbq_id_start;
u32 tpm_wrbq_id_count;
u32 tpm_defq_id_start;
u32 tpm_defq_id_count;
u32 iscsi_wrbq_id_start;
u32 iscsi_wrbq_id_count;
u32 iscsi_defq_id_start;
u32 iscsi_defq_id_count;
u32 rdma_qp_id_start;
u32 rdma_qp_id_count;
u32 rsvd1[8];
} __packed;
union FWCMD_COMMON_ANON_259_PARAMS {
struct FWCMD_COMMON_ANON_260_REQUEST request;
struct BE_FIRMWARE_CONFIG response;
} __packed;
/*
* This comand queries the current firmware configuration parameters.
* The static configuration type is defined by be_config_number. This
* differentiates different BladeEngine builds, such as iSCSI Initiator
* versus iSCSI Target. For a given static configuration, the Upper
* Layer Protocol (ULP) processors may be reconfigured to support different
* protocols. Each ULP processor supports one or more protocols. The
* masks indicate which processors are configured for each protocol.
* For a given static configuration, the number of TCP connections
* supported for each protocol may vary. The *_id_start and *_id_count
* variables define a linear range of IDs that are available for each
* supported protocol. The *_id_count may be used by the driver to allocate
* the appropriate number of connection resources. The *_id_start may
* be used to map the arbitrary range of IDs to a zero-based range
* of indices.
*/
struct FWCMD_COMMON_FIRMWARE_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_259_PARAMS params;
} __packed;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
u32 emph_lev_sel_port0;
u32 emph_lev_sel_port1;
u8 xaui_vo_sel;
u8 xaui_state;
u16 rsvd0;
u32 xaui_eq_vector;
} __packed;
struct FWCMD_COMMON_ANON_262_REQUEST {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_261_PARAMS {
struct FWCMD_COMMON_ANON_262_REQUEST request;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
} __packed;
/*
* This comand can be used to read XAUI equalization parameters. The
* ARM firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_261_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_264_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_263_PARAMS {
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
struct FWCMD_COMMON_ANON_264_RESPONSE response;
} __packed;
/*
* This comand can be used to set XAUI equalization parameters. The ARM
* firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_263_PARAMS params;
} __packed;
#endif /* __fwcmd_common_bmap_h__ */

View File

@ -1,280 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_eth_bmap_h__
#define __fwcmd_eth_bmap_h__
#include "fwcmd_hdr_bmap.h"
#include "fwcmd_types_bmap.h"
struct MIB_ETH_STATISTICS_PARAMS_IN {
u32 rsvd0;
} __packed;
struct BE_RXF_STATS {
u32 p0recvdtotalbytesLSD; /* DWORD 0 */
u32 p0recvdtotalbytesMSD; /* DWORD 1 */
u32 p0recvdtotalframes; /* DWORD 2 */
u32 p0recvdunicastframes; /* DWORD 3 */
u32 p0recvdmulticastframes; /* DWORD 4 */
u32 p0recvdbroadcastframes; /* DWORD 5 */
u32 p0crcerrors; /* DWORD 6 */
u32 p0alignmentsymerrs; /* DWORD 7 */
u32 p0pauseframesrecvd; /* DWORD 8 */
u32 p0controlframesrecvd; /* DWORD 9 */
u32 p0inrangelenerrors; /* DWORD 10 */
u32 p0outrangeerrors; /* DWORD 11 */
u32 p0frametoolongerrors; /* DWORD 12 */
u32 p0droppedaddressmatch; /* DWORD 13 */
u32 p0droppedvlanmismatch; /* DWORD 14 */
u32 p0ipdroppedtoosmall; /* DWORD 15 */
u32 p0ipdroppedtooshort; /* DWORD 16 */
u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */
u32 p0tcpdroppedlen; /* DWORD 18 */
u32 p0droppedrunt; /* DWORD 19 */
u32 p0recvd64; /* DWORD 20 */
u32 p0recvd65_127; /* DWORD 21 */
u32 p0recvd128_256; /* DWORD 22 */
u32 p0recvd256_511; /* DWORD 23 */
u32 p0recvd512_1023; /* DWORD 24 */
u32 p0recvd1518_1522; /* DWORD 25 */
u32 p0recvd1522_2047; /* DWORD 26 */
u32 p0recvd2048_4095; /* DWORD 27 */
u32 p0recvd4096_8191; /* DWORD 28 */
u32 p0recvd8192_9216; /* DWORD 29 */
u32 p0rcvdipcksmerrs; /* DWORD 30 */
u32 p0recvdtcpcksmerrs; /* DWORD 31 */
u32 p0recvdudpcksmerrs; /* DWORD 32 */
u32 p0recvdnonrsspackets; /* DWORD 33 */
u32 p0recvdippackets; /* DWORD 34 */
u32 p0recvdchute1packets; /* DWORD 35 */
u32 p0recvdchute2packets; /* DWORD 36 */
u32 p0recvdchute3packets; /* DWORD 37 */
u32 p0recvdipsecpackets; /* DWORD 38 */
u32 p0recvdmanagementpackets; /* DWORD 39 */
u32 p0xmitbyteslsd; /* DWORD 40 */
u32 p0xmitbytesmsd; /* DWORD 41 */
u32 p0xmitunicastframes; /* DWORD 42 */
u32 p0xmitmulticastframes; /* DWORD 43 */
u32 p0xmitbroadcastframes; /* DWORD 44 */
u32 p0xmitpauseframes; /* DWORD 45 */
u32 p0xmitcontrolframes; /* DWORD 46 */
u32 p0xmit64; /* DWORD 47 */
u32 p0xmit65_127; /* DWORD 48 */
u32 p0xmit128_256; /* DWORD 49 */
u32 p0xmit256_511; /* DWORD 50 */
u32 p0xmit512_1023; /* DWORD 51 */
u32 p0xmit1518_1522; /* DWORD 52 */
u32 p0xmit1522_2047; /* DWORD 53 */
u32 p0xmit2048_4095; /* DWORD 54 */
u32 p0xmit4096_8191; /* DWORD 55 */
u32 p0xmit8192_9216; /* DWORD 56 */
u32 p0rxfifooverflowdropped; /* DWORD 57 */
u32 p0ipseclookupfaileddropped; /* DWORD 58 */
u32 p1recvdtotalbytesLSD; /* DWORD 59 */
u32 p1recvdtotalbytesMSD; /* DWORD 60 */
u32 p1recvdtotalframes; /* DWORD 61 */
u32 p1recvdunicastframes; /* DWORD 62 */
u32 p1recvdmulticastframes; /* DWORD 63 */
u32 p1recvdbroadcastframes; /* DWORD 64 */
u32 p1crcerrors; /* DWORD 65 */
u32 p1alignmentsymerrs; /* DWORD 66 */
u32 p1pauseframesrecvd; /* DWORD 67 */
u32 p1controlframesrecvd; /* DWORD 68 */
u32 p1inrangelenerrors; /* DWORD 69 */
u32 p1outrangeerrors; /* DWORD 70 */
u32 p1frametoolongerrors; /* DWORD 71 */
u32 p1droppedaddressmatch; /* DWORD 72 */
u32 p1droppedvlanmismatch; /* DWORD 73 */
u32 p1ipdroppedtoosmall; /* DWORD 74 */
u32 p1ipdroppedtooshort; /* DWORD 75 */
u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */
u32 p1tcpdroppedlen; /* DWORD 77 */
u32 p1droppedrunt; /* DWORD 78 */
u32 p1recvd64; /* DWORD 79 */
u32 p1recvd65_127; /* DWORD 80 */
u32 p1recvd128_256; /* DWORD 81 */
u32 p1recvd256_511; /* DWORD 82 */
u32 p1recvd512_1023; /* DWORD 83 */
u32 p1recvd1518_1522; /* DWORD 84 */
u32 p1recvd1522_2047; /* DWORD 85 */
u32 p1recvd2048_4095; /* DWORD 86 */
u32 p1recvd4096_8191; /* DWORD 87 */
u32 p1recvd8192_9216; /* DWORD 88 */
u32 p1rcvdipcksmerrs; /* DWORD 89 */
u32 p1recvdtcpcksmerrs; /* DWORD 90 */
u32 p1recvdudpcksmerrs; /* DWORD 91 */
u32 p1recvdnonrsspackets; /* DWORD 92 */
u32 p1recvdippackets; /* DWORD 93 */
u32 p1recvdchute1packets; /* DWORD 94 */
u32 p1recvdchute2packets; /* DWORD 95 */
u32 p1recvdchute3packets; /* DWORD 96 */
u32 p1recvdipsecpackets; /* DWORD 97 */
u32 p1recvdmanagementpackets; /* DWORD 98 */
u32 p1xmitbyteslsd; /* DWORD 99 */
u32 p1xmitbytesmsd; /* DWORD 100 */
u32 p1xmitunicastframes; /* DWORD 101 */
u32 p1xmitmulticastframes; /* DWORD 102 */
u32 p1xmitbroadcastframes; /* DWORD 103 */
u32 p1xmitpauseframes; /* DWORD 104 */
u32 p1xmitcontrolframes; /* DWORD 105 */
u32 p1xmit64; /* DWORD 106 */
u32 p1xmit65_127; /* DWORD 107 */
u32 p1xmit128_256; /* DWORD 108 */
u32 p1xmit256_511; /* DWORD 109 */
u32 p1xmit512_1023; /* DWORD 110 */
u32 p1xmit1518_1522; /* DWORD 111 */
u32 p1xmit1522_2047; /* DWORD 112 */
u32 p1xmit2048_4095; /* DWORD 113 */
u32 p1xmit4096_8191; /* DWORD 114 */
u32 p1xmit8192_9216; /* DWORD 115 */
u32 p1rxfifooverflowdropped; /* DWORD 116 */
u32 p1ipseclookupfaileddropped; /* DWORD 117 */
u32 pxdroppednopbuf; /* DWORD 118 */
u32 pxdroppednotxpb; /* DWORD 119 */
u32 pxdroppednoipsecbuf; /* DWORD 120 */
u32 pxdroppednoerxdescr; /* DWORD 121 */
u32 pxdroppednotpredescr; /* DWORD 122 */
u32 pxrecvdmanagementportpackets; /* DWORD 123 */
u32 pxrecvdmanagementportbytes; /* DWORD 124 */
u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */
u32 pxrecvdmanagementporterrors; /* DWORD 126 */
u32 pxxmitmanagementportpackets; /* DWORD 127 */
u32 pxxmitmanagementportbytes; /* DWORD 128 */
u32 pxxmitmanagementportpause; /* DWORD 129 */
u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */
u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */
u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */
u32 pxipsecrunt; /* DWORD 134 */
u32 pxipsecaddressmismatchdropped; /* DWORD 135 */
u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */
u32 pxipsecframestoolong; /* DWORD 137 */
u32 pxipsectotalipframes; /* DWORD 138 */
u32 pxipseciptoosmall; /* DWORD 139 */
u32 pxipseciptooshort; /* DWORD 140 */
u32 pxipseciphdrtoosmall; /* DWORD 141 */
u32 pxipsectcphdrbad; /* DWORD 142 */
u32 pxrecvdipsecchute1; /* DWORD 143 */
u32 pxrecvdipsecchute2; /* DWORD 144 */
u32 pxrecvdipsecchute3; /* DWORD 145 */
u32 pxdropped7frags; /* DWORD 146 */
u32 pxdroppedfrags; /* DWORD 147 */
u32 pxdroppedinvalidfragring; /* DWORD 148 */
u32 pxnumforwardedpackets; /* DWORD 149 */
} __packed;
union MIB_ETH_STATISTICS_PARAMS {
struct MIB_ETH_STATISTICS_PARAMS_IN request;
struct BE_RXF_STATS response;
} __packed;
/*
* Query ethernet statistics. All domains may issue this command. The
* host domain drivers may optionally reset internal statistic counters
* with a query.
*/
struct FWCMD_ETH_GET_STATISTICS {
union FWCMD_HEADER header;
union MIB_ETH_STATISTICS_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_175_REQUEST {
u8 port0_promiscuous;
u8 port1_promiscuous;
u16 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_ETH_ANON_174_PARAMS {
struct FWCMD_ETH_ANON_175_REQUEST request;
struct FWCMD_ETH_ANON_176_RESPONSE response;
} __packed;
/* Enables/Disables promiscuous ethernet receive mode. */
struct FWCMD_ETH_PROMISCUOUS {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_174_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_178_REQUEST {
u32 new_fragsize_log2;
} __packed;
struct FWCMD_ETH_ANON_179_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_177_PARAMS {
struct FWCMD_ETH_ANON_178_REQUEST request;
struct FWCMD_ETH_ANON_179_RESPONSE response;
} __packed;
/*
* Sets the Ethernet RX fragment size. Only host (domain 0) networking
* drivers may issue this command. This call will fail for non-host
* protection domains. In this situation the MCC CQ status will indicate
* a failure due to insufficient priviledges. The response should be
* ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
* query the existing ethernet receive fragment size. It must use this
* fragment size for all fragments in the ethernet receive ring. If
* the command succeeds, the driver must use the frag size indicated
* in the command response since the requested frag size may not be applied
* until the next reboot. When the requested fragsize matches the response
* fragsize, this indicates the request was applied immediately.
*/
struct FWCMD_ETH_SET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_177_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_181_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_182_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_180_PARAMS {
struct FWCMD_ETH_ANON_181_REQUEST request;
struct FWCMD_ETH_ANON_182_RESPONSE response;
} __packed;
/*
* Queries the Ethernet RX fragment size. All domains may issue this
* command. The driver should call this command to determine the minimum
* required fragment size for the ethernet RX ring buffers. Drivers
* may choose to use a larger size for each fragment buffer, but BladeEngine
* will use up to the configured minimum required fragsize in each ethernet
* receive fragment buffer. For example, if the ethernet receive fragment
* size is configured to 4kB, and a driver uses 8kB fragments, a 6kB
* ethernet packet received by BladeEngine will be split accross two
* of the driver's receive framgents (4kB in one fragment buffer, and
* 2kB in the subsequent fragment buffer).
*/
struct FWCMD_ETH_GET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_180_PARAMS params;
} __packed;
#endif /* __fwcmd_eth_bmap_h__ */

View File

@ -1,54 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_hdr_bmap_h__
#define __fwcmd_hdr_bmap_h__
struct FWCMD_REQUEST_HEADER {
u8 opcode;
u8 subsystem;
u8 port_number;
u8 domain;
u32 timeout;
u32 request_length;
u32 rsvd0;
} __packed;
struct FWCMD_RESPONSE_HEADER {
u8 opcode;
u8 subsystem;
u8 rsvd0;
u8 domain;
u8 status;
u8 additional_status;
u16 rsvd1;
u32 response_length;
u32 actual_response_length;
} __packed;
/*
* The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
* the output FWCMD_RESPONSE_HEADER.
*/
union FWCMD_HEADER {
struct FWCMD_REQUEST_HEADER request;
struct FWCMD_RESPONSE_HEADER response;
} __packed;
#endif /* __fwcmd_hdr_bmap_h__ */

View File

@ -1,94 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_mcc_amap_h__
#define __fwcmd_mcc_amap_h__
#include "fwcmd_opcodes.h"
/*
* Where applicable, a WRB, may contain a list of Scatter-gather elements.
* Each element supports a 64 bit address and a 32bit length field.
*/
struct BE_MCC_SGE_AMAP {
u8 pa_lo[32]; /* DWORD 0 */
u8 pa_hi[32]; /* DWORD 1 */
u8 length[32]; /* DWORD 2 */
} __packed;
struct MCC_SGE_AMAP {
u32 dw[3];
};
/*
* The design of an MCC_SGE allows up to 19 elements to be embedded
* in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
*/
struct BE_MCC_WRB_PAYLOAD_AMAP {
union {
struct BE_MCC_SGE_AMAP sgl[19];
u8 embedded[59][32]; /* DWORD 0 */
};
} __packed;
struct MCC_WRB_PAYLOAD_AMAP {
u32 dw[59];
};
/*
* This is the structure of the MCC Command WRB for commands
* sent to the Management Processing Unit (MPU). See section
* for usage in embedded and non-embedded modes.
*/
struct BE_MCC_WRB_AMAP {
u8 embedded; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 sge_count[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 special[8]; /* DWORD 0 */
u8 payload_length[32]; /* DWORD 1 */
u8 tag[2][32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 4 */
struct BE_MCC_WRB_PAYLOAD_AMAP payload;
} __packed;
struct MCC_WRB_AMAP {
u32 dw[64];
};
/* This is the structure of the MCC Completion queue entry */
struct BE_MCC_CQ_ENTRY_AMAP {
u8 completion_status[16]; /* DWORD 0 */
u8 extended_status[16]; /* DWORD 0 */
u8 mcc_tag[2][32]; /* DWORD 1 */
u8 rsvd0[27]; /* DWORD 3 */
u8 consumed; /* DWORD 3 */
u8 completed; /* DWORD 3 */
u8 hpi_buffer_completion; /* DWORD 3 */
u8 async_event; /* DWORD 3 */
u8 valid; /* DWORD 3 */
} __packed;
struct MCC_CQ_ENTRY_AMAP {
u32 dw[4];
};
/* Mailbox structures used by the MPU during bootstrap */
struct BE_MCC_MAILBOX_AMAP {
struct BE_MCC_WRB_AMAP wrb;
struct BE_MCC_CQ_ENTRY_AMAP cq;
} __packed;
struct MCC_MAILBOX_AMAP {
u32 dw[68];
};
#endif /* __fwcmd_mcc_amap_h__ */

View File

@ -1,244 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_opcodes_amap_h__
#define __fwcmd_opcodes_amap_h__
/*
* --- FWCMD_SUBSYSTEMS ---
* The commands are grouped into the following subsystems. The subsystem
* code along with the opcode uniquely identify a particular fwcmd.
*/
#define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */
/* never used. */
#define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to
* all subsystems. See
* COMMON_SUBSYSTEM_OPCODES for opcodes
* and Common Host Configuration CMDs
* for the FWCMD descriptions.
*/
#define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */
/*
* common to Initiator and Target. See
* COMMON_ISCSI_SUBSYSTEM_OPCODES and
* Common iSCSI Initiator and Target
* CMDs for the command descriptions.
*/
#define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to
execute Ethernet commands. */
#define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used
to execute TPM commands. */
#define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used
* to execute PXE
* and UNDI specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to
execute ISCSI Initiator
specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used
to execute iSCSI Target
specific commands.between
PTL and ARM firmware.
*/
#define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to
execute iSCSI Target specific
commands.between MILI
and PTL. */
#define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to
execute iSCSI Target specific
commands between MILI
and TMD. */
#define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used
to execute proxied commands
within the host at the
explicit request of a
non priviledged domain.
This 'subsystem' is entirely
virtual from the controller
and firmware perspective as
it is implemented in host
drivers.
*/
/*
* --- COMMON_SUBSYSTEM_OPCODES ---
* These opcodes are common to both networking and storage PCI
* functions. They are used to reserve resources and configure
* BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
* subsystem code.
*/
#define OPCODE_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_SET (1)
#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
#define SUBSYSTEM_COMMON_READ_FLASHROM (1)
#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_RING_DESTROY (1)
#define SUBSYSTEM_COMMON_CQ_CREATE (1)
#define SUBSYSTEM_COMMON_EQ_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_RX_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_TX_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
#define SUBSYSTEM_COMMON_MCC_CREATE (1)
#define SUBSYSTEM_COMMON_JELL_CONFIG (1)
#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
#define SUBSYSTEM_COMMON_GET_QOS (1)
#define SUBSYSTEM_COMMON_SET_QOS (1)
#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
#define SUBSYSTEM_COMMON_SEEPROM_READ (1)
#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
#define SUBSYSTEM_COMMON_NOP (1)
#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
#define SUBSYSTEM_COMMON_GET_FAT (1)
#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_RED_CONFIG (1)
#define OPCODE_COMMON_NTWK_MAC_SET (2)
#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
#define OPCODE_COMMON_NTWK_VLAN_CONFIG (4)
#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
#define OPCODE_COMMON_READ_FLASHROM (6)
#define OPCODE_COMMON_WRITE_FLASHROM (7)
#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
#define OPCODE_COMMON_ADD_PAGE_TABLES (9)
#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
#define OPCODE_COMMON_RING_DESTROY (11)
#define OPCODE_COMMON_CQ_CREATE (12)
#define OPCODE_COMMON_EQ_CREATE (13)
#define OPCODE_COMMON_ETH_RX_CREATE (14)
#define OPCODE_COMMON_ETH_TX_CREATE (15)
#define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
#define OPCODE_COMMON_MCC_CREATE (21)
#define OPCODE_COMMON_JELL_CONFIG (22)
#define OPCODE_COMMON_FORCE_FAILOVER (23)
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
#define OPCODE_COMMON_POST_ZERO_BUFFER (26)
#define OPCODE_COMMON_GET_QOS (27)
#define OPCODE_COMMON_SET_QOS (28)
#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
#define OPCODE_COMMON_SEEPROM_READ (30)
#define OPCODE_COMMON_TCP_STATE_QUERY (31)
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
#define OPCODE_COMMON_NOP (33)
#define OPCODE_COMMON_NTWK_RX_FILTER (34)
#define OPCODE_COMMON_GET_FW_VERSION (35)
#define OPCODE_COMMON_SET_FLOW_CONTROL (36)
#define OPCODE_COMMON_GET_FLOW_CONTROL (37)
#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
#define OPCODE_COMMON_SET_FRAME_SIZE (39)
#define OPCODE_COMMON_GET_FAT (40)
#define OPCODE_COMMON_MODIFY_EQ_DELAY (41)
#define OPCODE_COMMON_FIRMWARE_CONFIG (42)
#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
#define OPCODE_COMMON_SET_VLD_CONFIG (45)
#define OPCODE_COMMON_GET_VLD_CONFIG (46)
#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
#define OPCODE_COMMON_RED_CONFIG (49)
/*
* --- ETH_SUBSYSTEM_OPCODES ---
* These opcodes are used for configuring the Ethernet interfaces. These
* opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
*/
#define OPCODE_ETH_RSS_CONFIG (1)
#define OPCODE_ETH_ACPI_CONFIG (2)
#define SUBSYSTEM_ETH_RSS_CONFIG (3)
#define SUBSYSTEM_ETH_ACPI_CONFIG (3)
#define OPCODE_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_GET_STATISTICS (3)
#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3)
#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3)
#define OPCODE_ETH_GET_STATISTICS (4)
#define OPCODE_ETH_GET_RX_FRAG_SIZE (5)
#define OPCODE_ETH_SET_RX_FRAG_SIZE (6)
/*
* --- MCC_STATUS_CODE ---
* These are the global status codes used by all subsystems
*/
#define MCC_STATUS_SUCCESS (0) /* Indicates a successful
completion of the command */
#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
sufficient privileges to
execute the command */
#define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command
was invalid. The extended
status contains the index
of the parameter */
#define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient
chip resources to execute
the command */
#define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing
because the queue was
getting flushed */
#define MCC_STATUS_DMA_FAILED (5) /* The command is completing
with a DMA error */
/*
* --- MGMT_ERROR_CODES ---
* Error Codes returned in the status field of the FWCMD response header
*/
#define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed
without errors */
#define MGMT_STATUS_FAILED (1) /* Error status in the Status
field of the
struct FWCMD_RESPONSE_HEADER */
#define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */
#define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in
the FWCMD payload */
#endif /* __fwcmd_opcodes_amap_h__ */

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_types_bmap_h__
#define __fwcmd_types_bmap_h__
/* MAC address format */
struct MAC_ADDRESS_FORMAT {
u16 SizeOfStructure;
u8 MACAddress[6];
} __packed;
#endif /* __fwcmd_types_bmap_h__ */

View File

@ -1,182 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __host_struct_amap_h__
#define __host_struct_amap_h__
#include "be_cm.h"
#include "be_common.h"
#include "descriptors.h"
/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
#define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */
/* qcompletion ueue. */
#define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */
#define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */
#define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */
#define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */
#define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */
/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
#define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */
/* completion queue. */
#define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */
/* Queue Entry Definition for all 4 byte event queue types. */
struct BE_EQ_ENTRY_AMAP {
u8 Valid; /* DWORD 0 */
u8 MajorCode[3]; /* DWORD 0 */
u8 MinorCode[12]; /* DWORD 0 */
u8 ResourceID[16]; /* DWORD 0 */
} __packed;
struct EQ_ENTRY_AMAP {
u32 dw[1];
};
/*
* --- ETH_EVENT_CODE ---
* These codes are returned by the MPU when one of these events has occurred,
* and the event is configured to report to an Event Queue when an event
* is detected.
*/
#define ETH_EQ_LINK_STATUS (0) /* Link status change event */
/* detected. */
#define ETH_EQ_WATERMARK (1) /* watermark event detected. */
#define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */
#define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */
/* detected. */
/*
* --- ETH_TX_COMPL_STATUS_ENUM ---
* Status codes contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_VALID (0)
#define ETH_COMP_ERROR (1)
#define ETH_COMP_INVALID (15)
/*
* --- ETH_TX_COMPL_PORT_ENUM ---
* Port indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_PORT0 (0)
#define ETH_COMP_PORT1 (1)
#define ETH_COMP_MGMT (2)
/*
* --- ETH_TX_COMPL_CT_ENUM ---
* Completion type indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_ETH (0)
/*
* Work request block that the driver issues to the chip for
* Ethernet transmissions. All control fields must be valid in each WRB for
* a message. The controller, as specified by the flags, optionally writes
* an entry to the Completion Ring and generate an event.
*/
struct BE_ETH_WRB_AMAP {
u8 frag_pa_hi[32]; /* DWORD 0 */
u8 frag_pa_lo[32]; /* DWORD 1 */
u8 complete; /* DWORD 2 */
u8 event; /* DWORD 2 */
u8 crc; /* DWORD 2 */
u8 forward; /* DWORD 2 */
u8 ipsec; /* DWORD 2 */
u8 mgmt; /* DWORD 2 */
u8 ipcs; /* DWORD 2 */
u8 udpcs; /* DWORD 2 */
u8 tcpcs; /* DWORD 2 */
u8 lso; /* DWORD 2 */
u8 last; /* DWORD 2 */
u8 vlan; /* DWORD 2 */
u8 dbg[3]; /* DWORD 2 */
u8 hash_val[3]; /* DWORD 2 */
u8 lso_mss[14]; /* DWORD 2 */
u8 frag_len[16]; /* DWORD 3 */
u8 vlan_tag[16]; /* DWORD 3 */
} __packed;
struct ETH_WRB_AMAP {
u32 dw[4];
};
/* This is an Ethernet transmit completion descriptor */
struct BE_ETH_TX_COMPL_AMAP {
u8 user_bytes[16]; /* DWORD 0 */
u8 nwh_bytes[8]; /* DWORD 0 */
u8 lso; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 wrb_index[16]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 port[2]; /* DWORD 1 */
u8 rsvd1[8]; /* DWORD 1 */
u8 status[4]; /* DWORD 1 */
u8 rsvd2[16]; /* DWORD 2 */
u8 ringid[11]; /* DWORD 2 */
u8 hash_val[4]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
} __packed;
struct ETH_TX_COMPL_AMAP {
u32 dw[4];
};
/* Ethernet Receive Buffer descriptor */
struct BE_ETH_RX_D_AMAP {
u8 fragpa_hi[32]; /* DWORD 0 */
u8 fragpa_lo[32]; /* DWORD 1 */
} __packed;
struct ETH_RX_D_AMAP {
u32 dw[2];
};
/* This is an Ethernet Receive Completion Descriptor */
struct BE_ETH_RX_COMPL_AMAP {
u8 vlan_tag[16]; /* DWORD 0 */
u8 pktsize[14]; /* DWORD 0 */
u8 port; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 err; /* DWORD 1 */
u8 rsshp; /* DWORD 1 */
u8 ipf; /* DWORD 1 */
u8 tcpf; /* DWORD 1 */
u8 udpf; /* DWORD 1 */
u8 ipcksm; /* DWORD 1 */
u8 tcpcksm; /* DWORD 1 */
u8 udpcksm; /* DWORD 1 */
u8 macdst[6]; /* DWORD 1 */
u8 vtp; /* DWORD 1 */
u8 vtm; /* DWORD 1 */
u8 fragndx[10]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 ipsec; /* DWORD 1 */
u8 numfrags[3]; /* DWORD 1 */
u8 rsvd1[31]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsshash[32]; /* DWORD 3 */
} __packed;
struct ETH_RX_COMPL_AMAP {
u32 dw[4];
};
#endif /* __host_struct_amap_h__ */

View File

@ -1,830 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef __hwlib_h__
#define __hwlib_h__
#include <linux/module.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include "regmap.h" /* srcgen array map output */
#include "asyncmesg.h"
#include "fwcmd_opcodes.h"
#include "post_codes.h"
#include "fwcmd_mcc.h"
#include "fwcmd_types_bmap.h"
#include "fwcmd_common_bmap.h"
#include "fwcmd_eth_bmap.h"
#include "bestatus.h"
/*
*
* Macros for reading/writing a protection domain or CSR registers
* in BladeEngine.
*/
#define PD_READ(fo, field) ioread32((fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define CSR_READ(fo, field) ioread32((fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#ifdef BE_DEBUG
#define ASSERT(c) BUG_ON(!(c));
#else
#define ASSERT(c)
#endif
/* debug levels */
enum BE_DEBUG_LEVELS {
DL_ALWAYS = 0, /* cannot be masked */
DL_ERR = 0x1, /* errors that should never happen */
DL_WARN = 0x2, /* something questionable.
recoverable errors */
DL_NOTE = 0x4, /* infrequent, important debug info */
DL_INFO = 0x8, /* debug information */
DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
} ;
extern unsigned int trace_level;
#define TRACE(lm, fmt, args...) { \
if (trace_level & lm) { \
printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
__FILE__ , __LINE__ , ## args); \
} \
}
static inline unsigned int be_trace_set_level(unsigned int level)
{
unsigned int old_level = trace_level;
trace_level = level;
return old_level;
}
#define be_trace_get_level() trace_level
/*
* Returns number of pages spanned by the size of data
* starting at the given address.
*/
#define PAGES_SPANNED(_address, _size) \
((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
(_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
/*
* circular subtract.
* Returns a - b assuming a circular number system, where a and b are
* in range (0, maxValue-1). If a==b, zero is returned so the
* highest value possible with this subtraction is maxValue-1.
*/
static inline u32 be_subc(u32 a, u32 b, u32 max)
{
ASSERT(a <= max && b <= max);
ASSERT(max > 0);
return a >= b ? (a - b) : (max - b + a);
}
static inline u32 be_addc(u32 a, u32 b, u32 max)
{
ASSERT(a < max);
ASSERT(max > 0);
return (max - a > b) ? (a + b) : (b + a - max);
}
/* descriptor for a physically contiguous memory used for ring */
struct ring_desc {
u32 length; /* length in bytes */
void *va; /* virtual address */
u64 pa; /* bus address */
} ;
/*
* This structure stores information about a ring shared between hardware
* and software. Each ring is allocated by the driver in the uncached
* extension and mapped into BladeEngine's unified table.
*/
struct mp_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
u32 num; /* number of elements in queue */
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 itemSize; /* size in bytes of one object */
void *va; /* The virtual address of the ring.
This should be last to allow 32 & 64
bit debugger extensions to work. */
} ;
/*----------- amap bit filed get / set macros and functions -----*/
/*
* Structures defined in the map header files (under fw/amap/) with names
* in the format BE_<name>_AMAP are pseudo structures with members
* of type u8. These structures are templates that are used in
* conjuntion with the structures with names in the format
* <name>_AMAP to calculate the bit masks and bit offsets to get or set
* bit fields in structures. The structures <name>_AMAP are arrays
* of 32 bits words and have the correct size. The following macros
* provide convenient ways to get and set the various members
* in the structures without using strucctures with bit fields.
* Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
* macros to extract and set various members.
*/
/*
* Returns the a bit mask for the register that is NOT shifted into location.
* That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
*/
static inline u32 amap_mask(u32 bit_size)
{
return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
}
#define AMAP_BIT_MASK(_struct_, field) \
amap_mask(AMAP_BIT_SIZE(_struct_, field))
/*
* non-optimized set bits function. First clears the bits and then assigns them.
* This does not require knowledge of the particular DWORD you are setting.
* e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
*/
static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
u32 *dw = (u32 *)ptr;
*(dw + dw_offset) &= ~(mask << offset);
*(dw + dw_offset) |= (mask & value) << offset;
}
#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \
amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field), val)
/*
* Non-optimized routine that gets the bits without knowing the correct DWORD.
* e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
*/
static inline u32
amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
u32 *dw = (u32 *)ptr;
return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \
amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field))
/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
#define AMAP_BIT_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
/* Returns 0-n representing DWORD offset of bitfield within the structure. */
#define AMAP_WORD_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
/* Returns size of bitfield in bits. */
#define AMAP_BIT_SIZE(_struct_, field) \
sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
struct be_mcc_wrb_response_copy {
u16 length; /* bytes in response */
u16 fwcmd_offset; /* offset within the wrb of the response */
void *va; /* user's va to copy response into */
} ;
typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
struct MCC_WRB_AMAP *optional_wrb);
struct be_mcc_wrb_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
int *users_final_status; /* pointer to a local
variable for synchronous
commands */
struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
commands only */
struct list_head next; /* links context structs together in
free list */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
#if defined(BE_DEBUG)
u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
struct MCC_WRB_AMAP *ring_wrb;
u32 consumed_count;
#endif
} ;
/*
Represents a function object for network or storage. This
is used to manage per-function resources like MCC CQs, etc.
*/
struct be_function_object {
u32 magic; /*!< magic for detecting memory corruption. */
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI config space */
u32 emulate; /* if set, MPU is not available.
Emulate everything. */
u32 pend_queue_driving; /* if set, drive the queued WRBs
after releasing the WRB lock */
spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
spinlock_t cq_lock; /* lock for verifying one thread
processing cq */
spinlock_t mcc_context_lock; /* lock for protecting mcc
context free list */
unsigned long post_irq;
unsigned long cq_irq;
u32 type;
u32 pci_function_number;
struct be_mcc_object *mcc; /* mcc rings. */
struct {
struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
u64 pa; /* PA to the mailbox */
u32 length; /* byte length of mailbox */
/* One default context struct used for posting at
* least one MCC_WRB
*/
struct be_mcc_wrb_context default_context;
bool default_context_allocated;
} mailbox;
struct {
/* Wake on lans configured. */
u32 wol_bitmask; /* bits 0,1,2,3 are set if
corresponding index is enabled */
} config;
struct BE_FIRMWARE_CONFIG fw_config;
} ;
/*
Represents an Event Queue
*/
struct be_eq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct list_head eq_list;
struct list_head cq_list_head;
u32 eq_id;
void *cb_context;
} ;
/*
Manages a completion queue
*/
struct be_cq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct be_eq_object *eq_object;
struct list_head cq_list;
struct list_head cqlist_for_eq;
void *va;
u32 num_entries;
void *cb_context;
u32 cq_id;
} ;
/*
Manages an ethernet send queue
*/
struct be_ethsq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
struct be_cq_object *cq_object;
u32 bid;
} ;
/*
@brief
Manages an ethernet receive queue
*/
struct be_ethrq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
u32 rid;
struct be_cq_object *cq_object;
struct be_cq_object *rss_cq_object[4];
} ;
/*
Manages an MCC
*/
typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
void *event);
struct be_mcc_object {
u32 magic;
struct be_function_object *parent_function;
struct list_head mcc_list;
struct be_cq_object *cq_object;
/* Async event callback for MCC CQ. */
mcc_async_event_callback async_cb;
void *async_context;
struct {
struct be_mcc_wrb_context *base;
u32 num;
struct list_head list_head;
} wrb_context;
struct {
struct ring_desc *rd;
struct mp_ring ring;
} sq;
struct {
struct mp_ring ring;
} cq;
u32 processing; /* flag indicating that one thread
is processing CQ */
u32 rearm; /* doorbell rearm setting to make
sure the active processing thread */
/* rearms the CQ if any of the threads requested it. */
struct list_head backlog;
u32 backlog_length;
u32 driving_backlog;
u32 consumed_index;
} ;
/* Queue context header -- the required software information for
* queueing a WRB.
*/
struct be_queue_driver_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
void *optional_fwcmd_va;
struct list_head list;
u32 bytes;
} ;
/*
* Common MCC WRB header that all commands require.
*/
struct be_mcc_wrb_header {
u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
} ;
/*
* All non embedded commands supported by hwlib functions only allow
* 1 SGE. This queue context handles them all.
*/
struct be_nonembedded_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_SGE_AMAP sge[1];
} ;
/*
* ------------------------------------------------------------------------
* This section contains the specific queue struct for each command.
* The user could always provide a be_generic_q_ctxt but this is a
* rather large struct. By using the specific struct, memory consumption
* can be reduced.
* ------------------------------------------------------------------------
*/
struct be_link_status_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
} ;
struct be_multicast_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
} ;
struct be_vlan_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
} ;
struct be_promiscuous_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_ETH_PROMISCUOUS fwcmd;
} ;
struct be_force_failover_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
} ;
struct be_rxf_filter_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
} ;
struct be_eq_modify_delay_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
} ;
/*
* The generic context is the largest size that would be required.
* It is the software context plus an entire WRB.
*/
struct be_generic_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_WRB_PAYLOAD_AMAP payload;
} ;
/*
* Types for the BE_QUEUE_CONTEXT object.
*/
#define BE_QUEUE_INVALID (0)
#define BE_QUEUE_LINK_STATUS (0xA006)
#define BE_QUEUE_ETH_STATS (0xA007)
#define BE_QUEUE_TPM_STATS (0xA008)
#define BE_QUEUE_TCP_STATS (0xA009)
#define BE_QUEUE_MULTICAST (0xA00A)
#define BE_QUEUE_VLAN (0xA00B)
#define BE_QUEUE_RSS (0xA00C)
#define BE_QUEUE_FORCE_FAILOVER (0xA00D)
#define BE_QUEUE_PROMISCUOUS (0xA00E)
#define BE_QUEUE_WAKE_ON_LAN (0xA00F)
#define BE_QUEUE_NOP (0xA010)
/* --- BE_FUNCTION_ENUM --- */
#define BE_FUNCTION_TYPE_ISCSI (0)
#define BE_FUNCTION_TYPE_NETWORK (1)
#define BE_FUNCTION_TYPE_ARM (2)
/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
#define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
#define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
/* network packets. */
#define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
/* network packets, bound */
/* to a physical port. */
/*
* ----------------------------------------------------------------------
* API MACROS
* ----------------------------------------------------------------------
*/
#define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
#define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
struct be_function_object *pfob);
int be_function_object_destroy(struct be_function_object *pfob);
int be_function_cleanup(struct be_function_object *pfob);
int be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_eq_modify_delay_q_ctxt *q_ctxt);
int be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
int be_eq_destroy(struct be_eq_object *eq);
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
bool solicited_eventable, bool no_delay,
u32 wm_thresh, struct be_eq_object *eq_object,
struct be_cq_object *cq_object);
int be_cq_destroy(struct be_cq_object *cq);
int be_mcc_ring_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
struct be_mcc_wrb_context *context_array,
u32 num_context_entries,
struct be_cq_object *cq, struct be_mcc_object *mcc);
int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
mcc_async_event_callback cb, void *cb_context);
int be_pci_soft_reset(struct be_function_object *pfob);
int be_drive_POST(struct be_function_object *pfob);
int be_eth_sq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_ethsq_object *eth_sq);
struct be_eth_sq_parameters {
u32 port;
u32 rsvd0[2];
} ;
int be_eth_sq_create_ex(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_eth_sq_parameters *ex_parameters,
struct be_ethsq_object *eth_sq);
int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
int be_eth_set_flow_control(struct be_function_object *pfob,
bool txfc_enable, bool rxfc_enable);
int be_eth_get_flow_control(struct be_function_object *pfob,
bool *txfc_enable, bool *rxfc_enable);
int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
int be_eth_set_frame_size(struct be_function_object *pfob,
u32 *tx_frame_size, u32 *rx_frame_size);
int be_eth_rq_create(struct be_function_object *pfob,
struct ring_desc *rd, struct be_cq_object *cq_object,
struct be_cq_object *bcmc_cq_object,
struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eth_rq_set_frag_size(struct be_function_object *pfob,
u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
int be_eth_rq_get_frag_size(struct be_function_object *pfob,
u32 *frag_size_bytes);
void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
u32 payload_length, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem);
void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
u32 payload_length, u32 request_length, u32 response_length,
u32 opcode, u32 subsystem);
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob);
int be_rxf_mac_address_read_write(struct be_function_object *pfob,
bool port1, bool mac1, bool mgmt,
bool write, bool permanent, u8 *mac_address,
mcc_wrb_cqe_callback cb,
void *cb_context);
int be_rxf_multicast_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u8 *mac_table,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_multicast_q_ctxt *q_ctxt);
int be_rxf_vlan_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u16 *vlan_tag_array,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_vlan_q_ctxt *q_ctxt);
int be_rxf_link_status(struct be_function_object *pfob,
struct BE_LINK_STATUS *link_status,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_link_status_q_ctxt *q_ctxt);
int be_rxf_query_eth_statistics(struct be_function_object *pfob,
struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_nonembedded_q_ctxt *q_ctxt);
int be_rxf_promiscuous(struct be_function_object *pfob,
bool enable_port0, bool enable_port1,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_promiscuous_q_ctxt *q_ctxt);
int be_rxf_filter_config(struct be_function_object *pfob,
struct NTWK_RX_FILTER_SETTINGS *settings,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_rxf_filter_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* internal functions used by hwlib
* ------------------------------------------------------
*/
int be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_callback_context);
int be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *response_copy);
int be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* MCC QUEUE
* ------------------------------------------------------
*/
int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
struct MCC_WRB_AMAP *
_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
struct be_mcc_wrb_context *
_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
void _be_mcc_free_wrb_context(struct be_function_object *pfob,
struct be_mcc_wrb_context *context);
int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
/*
* ------------------------------------------------------
* Ring Sizes
* ------------------------------------------------------
*/
static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
{
ASSERT(encoding != 1); /* 1 is rsvd */
ASSERT(encoding < 16);
ASSERT(object_size > 0);
if (encoding == 0) /* 32k deep */
encoding = 16;
return (1 << (encoding - 1)) * object_size;
}
static inline
u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
{
u32 count, encoding;
ASSERT(object_size > 0);
ASSERT(length_in_bytes % object_size == 0);
count = length_in_bytes / object_size;
ASSERT(count > 1);
ASSERT(count <= 32 * 1024);
ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
encoding = __ilog2_u32(count) + 1;
if (encoding == 16)
encoding = 0; /* 32k deep */
return encoding;
}
void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
u32 max_num);
#endif /* __hwlib_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_amap_h__
#define __mpu_amap_h__
#include "ep.h"
/* Provide control parameters for the Managment Processor Unit. */
struct BE_MPU_CSRMAP_AMAP {
struct BE_EP_CSRMAP_AMAP ep;
u8 rsvd0[128]; /* DWORD 64 */
u8 rsvd1[32]; /* DWORD 68 */
u8 rsvd2[192]; /* DWORD 69 */
u8 rsvd3[192]; /* DWORD 75 */
u8 rsvd4[32]; /* DWORD 81 */
u8 rsvd5[32]; /* DWORD 82 */
u8 rsvd6[32]; /* DWORD 83 */
u8 rsvd7[32]; /* DWORD 84 */
u8 rsvd8[32]; /* DWORD 85 */
u8 rsvd9[32]; /* DWORD 86 */
u8 rsvd10[32]; /* DWORD 87 */
u8 rsvd11[32]; /* DWORD 88 */
u8 rsvd12[32]; /* DWORD 89 */
u8 rsvd13[32]; /* DWORD 90 */
u8 rsvd14[32]; /* DWORD 91 */
u8 rsvd15[32]; /* DWORD 92 */
u8 rsvd16[32]; /* DWORD 93 */
u8 rsvd17[32]; /* DWORD 94 */
u8 rsvd18[32]; /* DWORD 95 */
u8 rsvd19[32]; /* DWORD 96 */
u8 rsvd20[32]; /* DWORD 97 */
u8 rsvd21[32]; /* DWORD 98 */
u8 rsvd22[32]; /* DWORD 99 */
u8 rsvd23[32]; /* DWORD 100 */
u8 rsvd24[32]; /* DWORD 101 */
u8 rsvd25[32]; /* DWORD 102 */
u8 rsvd26[32]; /* DWORD 103 */
u8 rsvd27[32]; /* DWORD 104 */
u8 rsvd28[96]; /* DWORD 105 */
u8 rsvd29[32]; /* DWORD 108 */
u8 rsvd30[32]; /* DWORD 109 */
u8 rsvd31[32]; /* DWORD 110 */
u8 rsvd32[32]; /* DWORD 111 */
u8 rsvd33[32]; /* DWORD 112 */
u8 rsvd34[96]; /* DWORD 113 */
u8 rsvd35[32]; /* DWORD 116 */
u8 rsvd36[32]; /* DWORD 117 */
u8 rsvd37[32]; /* DWORD 118 */
u8 rsvd38[32]; /* DWORD 119 */
u8 rsvd39[32]; /* DWORD 120 */
u8 rsvd40[32]; /* DWORD 121 */
u8 rsvd41[134][32]; /* DWORD 122 */
} __packed;
struct MPU_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __mpu_amap_h__ */

View File

@ -1,46 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_context_amap_h__
#define __mpu_context_amap_h__
/*
* Management command and control ring context. The MPUs BTLR_CTRL1 CSR
* controls the writeback behavior of the producer and consumer index values.
*/
struct BE_MCC_RING_CONTEXT_AMAP {
u8 con_index[16]; /* DWORD 0 */
u8 ring_size[4]; /* DWORD 0 */
u8 cq_id[11]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 prod_index[16]; /* DWORD 1 */
u8 pdid[15]; /* DWORD 1 */
u8 invalid; /* DWORD 1 */
u8 cmd_pending_current[7]; /* DWORD 2 */
u8 rsvd1[25]; /* DWORD 2 */
u8 hpi_port_cq_id[11]; /* DWORD 3 */
u8 rsvd2[5]; /* DWORD 3 */
u8 cmd_pending_max[7]; /* DWORD 3 */
u8 rsvd3[9]; /* DWORD 3 */
} __packed;
struct MCC_RING_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __mpu_context_amap_h__ */

View File

@ -1,825 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __pcicfg_amap_h__
#define __pcicfg_amap_h__
/* Vendor and Device ID Register. */
struct BE_PCICFG_ID_CSR_AMAP {
u8 vendorid[16]; /* DWORD 0 */
u8 deviceid[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ID_CSR_AMAP {
u32 dw[1];
};
/* IO Bar Register. */
struct BE_PCICFG_IOBAR_CSR_AMAP {
u8 iospace; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 iobar[24]; /* DWORD 0 */
} __packed;
struct PCICFG_IOBAR_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 0 Register. */
struct BE_PCICFG_MEMBAR0_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 membar0[18]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR0_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - Low Address Register. */
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 membar1lo[15]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - High Address Register. */
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
u8 membar1hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_HI_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - Low Address Register. */
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[17]; /* DWORD 0 */
u8 membar2lo[11]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - High Address Register. */
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
u8 membar2hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_HI_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 0) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 1) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
u8 locked; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Soft Reset Register. */
struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
u8 rsvd0[7]; /* DWORD 0 */
u8 softreset; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */
} __packed;
struct PCICFG_SOFT_RESET_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
u8 cev_ue_status; /* DWORD 0 */
u8 ctx_ue_status; /* DWORD 0 */
u8 dbuf_ue_status; /* DWORD 0 */
u8 erx_ue_status; /* DWORD 0 */
u8 host_ue_status; /* DWORD 0 */
u8 mpu_ue_status; /* DWORD 0 */
u8 ndma_ue_status; /* DWORD 0 */
u8 ptc_ue_status; /* DWORD 0 */
u8 rdma_ue_status; /* DWORD 0 */
u8 rxf_ue_status; /* DWORD 0 */
u8 rxips_ue_status; /* DWORD 0 */
u8 rxulp0_ue_status; /* DWORD 0 */
u8 rxulp1_ue_status; /* DWORD 0 */
u8 rxulp2_ue_status; /* DWORD 0 */
u8 tim_ue_status; /* DWORD 0 */
u8 tpost_ue_status; /* DWORD 0 */
u8 tpre_ue_status; /* DWORD 0 */
u8 txips_ue_status; /* DWORD 0 */
u8 txulp0_ue_status; /* DWORD 0 */
u8 txulp1_ue_status; /* DWORD 0 */
u8 uc_ue_status; /* DWORD 0 */
u8 wdma_ue_status; /* DWORD 0 */
u8 txulp2_ue_status; /* DWORD 0 */
u8 host1_ue_status; /* DWORD 0 */
u8 p0_ob_link_ue_status; /* DWORD 0 */
u8 p1_ob_link_ue_status; /* DWORD 0 */
u8 host_gpio_ue_status; /* DWORD 0 */
u8 mbox_netw_ue_status; /* DWORD 0 */
u8 mbox_stor_ue_status; /* DWORD 0 */
u8 axgmac0_ue_status; /* DWORD 0 */
u8 axgmac1_ue_status; /* DWORD 0 */
u8 mpu_intpend_ue_status; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (High) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip;
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
u8 jtag_ue_status; /* DWORD 0 */
u8 lpcmemhost_ue_status; /* DWORD 0 */
u8 mgmt_mac_ue_status; /* DWORD 0 */
u8 mpu_iram_ue_status; /* DWORD 0 */
u8 pcs0online_ue_status; /* DWORD 0 */
u8 pcs1online_ue_status; /* DWORD 0 */
u8 pctl0_ue_status; /* DWORD 0 */
u8 pctl1_ue_status; /* DWORD 0 */
u8 pmem_ue_status; /* DWORD 0 */
u8 rr_ue_status; /* DWORD 0 */
u8 rxpp_ue_status; /* DWORD 0 */
u8 txpb_ue_status; /* DWORD 0 */
u8 txp_ue_status; /* DWORD 0 */
u8 xaui_ue_status; /* DWORD 0 */
u8 arm_ue_status; /* DWORD 0 */
u8 ipc_ue_status; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u8 cev_ue_mask; /* DWORD 0 */
u8 ctx_ue_mask; /* DWORD 0 */
u8 dbuf_ue_mask; /* DWORD 0 */
u8 erx_ue_mask; /* DWORD 0 */
u8 host_ue_mask; /* DWORD 0 */
u8 mpu_ue_mask; /* DWORD 0 */
u8 ndma_ue_mask; /* DWORD 0 */
u8 ptc_ue_mask; /* DWORD 0 */
u8 rdma_ue_mask; /* DWORD 0 */
u8 rxf_ue_mask; /* DWORD 0 */
u8 rxips_ue_mask; /* DWORD 0 */
u8 rxulp0_ue_mask; /* DWORD 0 */
u8 rxulp1_ue_mask; /* DWORD 0 */
u8 rxulp2_ue_mask; /* DWORD 0 */
u8 tim_ue_mask; /* DWORD 0 */
u8 tpost_ue_mask; /* DWORD 0 */
u8 tpre_ue_mask; /* DWORD 0 */
u8 txips_ue_mask; /* DWORD 0 */
u8 txulp0_ue_mask; /* DWORD 0 */
u8 txulp1_ue_mask; /* DWORD 0 */
u8 uc_ue_mask; /* DWORD 0 */
u8 wdma_ue_mask; /* DWORD 0 */
u8 txulp2_ue_mask; /* DWORD 0 */
u8 host1_ue_mask; /* DWORD 0 */
u8 p0_ob_link_ue_mask; /* DWORD 0 */
u8 p1_ob_link_ue_mask; /* DWORD 0 */
u8 host_gpio_ue_mask; /* DWORD 0 */
u8 mbox_netw_ue_mask; /* DWORD 0 */
u8 mbox_stor_ue_mask; /* DWORD 0 */
u8 axgmac0_ue_mask; /* DWORD 0 */
u8 axgmac1_ue_mask; /* DWORD 0 */
u8 mpu_intpend_ue_mask; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u8 jtag_ue_mask; /* DWORD 0 */
u8 lpcmemhost_ue_mask; /* DWORD 0 */
u8 mgmt_mac_ue_mask; /* DWORD 0 */
u8 mpu_iram_ue_mask; /* DWORD 0 */
u8 pcs0online_ue_mask; /* DWORD 0 */
u8 pcs1online_ue_mask; /* DWORD 0 */
u8 pctl0_ue_mask; /* DWORD 0 */
u8 pctl1_ue_mask; /* DWORD 0 */
u8 pmem_ue_mask; /* DWORD 0 */
u8 rr_ue_mask; /* DWORD 0 */
u8 rxpp_ue_mask; /* DWORD 0 */
u8 txpb_ue_mask; /* DWORD 0 */
u8 txp_ue_mask; /* DWORD 0 */
u8 xaui_ue_mask; /* DWORD 0 */
u8 arm_ue_mask; /* DWORD 0 */
u8 ipc_ue_mask; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 0. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE0_CSR_AMAP {
u8 cev_online; /* DWORD 0 */
u8 ctx_online; /* DWORD 0 */
u8 dbuf_online; /* DWORD 0 */
u8 erx_online; /* DWORD 0 */
u8 host_online; /* DWORD 0 */
u8 mpu_online; /* DWORD 0 */
u8 ndma_online; /* DWORD 0 */
u8 ptc_online; /* DWORD 0 */
u8 rdma_online; /* DWORD 0 */
u8 rxf_online; /* DWORD 0 */
u8 rxips_online; /* DWORD 0 */
u8 rxulp0_online; /* DWORD 0 */
u8 rxulp1_online; /* DWORD 0 */
u8 rxulp2_online; /* DWORD 0 */
u8 tim_online; /* DWORD 0 */
u8 tpost_online; /* DWORD 0 */
u8 tpre_online; /* DWORD 0 */
u8 txips_online; /* DWORD 0 */
u8 txulp0_online; /* DWORD 0 */
u8 txulp1_online; /* DWORD 0 */
u8 uc_online; /* DWORD 0 */
u8 wdma_online; /* DWORD 0 */
u8 txulp2_online; /* DWORD 0 */
u8 host1_online; /* DWORD 0 */
u8 p0_ob_link_online; /* DWORD 0 */
u8 p1_ob_link_online; /* DWORD 0 */
u8 host_gpio_online; /* DWORD 0 */
u8 mbox_netw_online; /* DWORD 0 */
u8 mbox_stor_online; /* DWORD 0 */
u8 axgmac0_online; /* DWORD 0 */
u8 axgmac1_online; /* DWORD 0 */
u8 mpu_intpend_online; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE0_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 1. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE1_CSR_AMAP {
u8 jtag_online; /* DWORD 0 */
u8 lpcmemhost_online; /* DWORD 0 */
u8 mgmt_mac_online; /* DWORD 0 */
u8 mpu_iram_online; /* DWORD 0 */
u8 pcs0online_online; /* DWORD 0 */
u8 pcs1online_online; /* DWORD 0 */
u8 pctl0_online; /* DWORD 0 */
u8 pctl1_online; /* DWORD 0 */
u8 pmem_online; /* DWORD 0 */
u8 rr_online; /* DWORD 0 */
u8 rxpp_online; /* DWORD 0 */
u8 txpb_online; /* DWORD 0 */
u8 txp_online; /* DWORD 0 */
u8 xaui_online; /* DWORD 0 */
u8 arm_online; /* DWORD 0 */
u8 ipc_online; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE1_CSR_AMAP {
u32 dw[1];
};
/* Host Timer Register. */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u8 hosttimer[24]; /* DWORD 0 */
u8 hostintr; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
} __packed;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u32 dw[1];
};
/* Scratchpad Register (for software use). */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
u8 scratchpad[32]; /* DWORD 0 */
} __packed;
struct PCICFG_SCRATCHPAD_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Capabilities Register. */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextcap[8]; /* DWORD 0 */
u8 capver[4]; /* DWORD 0 */
u8 devport[4]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Capabilities Register. */
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
u8 payload[3]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 lo_lat[3]; /* DWORD 0 */
u8 l1_lat[3]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
u8 pwr_value[8]; /* DWORD 0 */
u8 pwr_scale[2]; /* DWORD 0 */
u8 rsvd3[4]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Control/Status Registers. */
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u8 CorrErrReportEn; /* DWORD 0 */
u8 NonFatalErrReportEn; /* DWORD 0 */
u8 FatalErrReportEn; /* DWORD 0 */
u8 UnsuppReqReportEn; /* DWORD 0 */
u8 EnableRelaxOrder; /* DWORD 0 */
u8 Max_Payload_Size[3]; /* DWORD 0 */
u8 ExtendTagFieldEnable; /* DWORD 0 */
u8 PhantomFnEnable; /* DWORD 0 */
u8 AuxPwrPMEnable; /* DWORD 0 */
u8 EnableNoSnoop; /* DWORD 0 */
u8 Max_Read_Req_Size[3]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 CorrErrDetect; /* DWORD 0 */
u8 NonFatalErrDetect; /* DWORD 0 */
u8 FatalErrDetect; /* DWORD 0 */
u8 UnsuppReqDetect; /* DWORD 0 */
u8 AuxPwrDetect; /* DWORD 0 */
u8 TransPending; /* DWORD 0 */
u8 rsvd1[10]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Capabilities Register. */
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u8 MaxLinkSpeed[4]; /* DWORD 0 */
u8 MaxLinkWidth[6]; /* DWORD 0 */
u8 ASPMSupport[2]; /* DWORD 0 */
u8 L0sExitLat[3]; /* DWORD 0 */
u8 L1ExitLat[3]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 PortNum[8]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Status Register. */
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u8 ASPMCtl[2]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 ReadCmplBndry; /* DWORD 0 */
u8 LinkDisable; /* DWORD 0 */
u8 RetrainLink; /* DWORD 0 */
u8 CommonClkConfig; /* DWORD 0 */
u8 ExtendSync; /* DWORD 0 */
u8 rsvd1[8]; /* DWORD 0 */
u8 LinkSpeed[4]; /* DWORD 0 */
u8 NegLinkWidth[6]; /* DWORD 0 */
u8 LinkTrainErr; /* DWORD 0 */
u8 LinkTrain; /* DWORD 0 */
u8 SlotClkConfig; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI Configuration Register. */
struct BE_PCICFG_MSI_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextptr[8]; /* DWORD 0 */
u8 tablesize[11]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 funcmask; /* DWORD 0 */
u8 en; /* DWORD 0 */
} __packed;
struct PCICFG_MSI_CSR_AMAP {
u32 dw[1];
};
/* MSI-X Table Offset Register. */
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
u8 tablebir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_TABLE_CSR_AMAP {
u32 dw[1];
};
/* MSI-X PBA Offset Register. */
struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
u8 pbabir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_PBA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Vector Control Register. */
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u8 vector_control; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Data Register. */
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u8 data[16]; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - High Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u8 addr[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - Low Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u8 rsvd0[2]; /* DWORD 0 */
u8 addr[30]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_18_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_18_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_19_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_19_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_20_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[25][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_20_RSVD_AMAP {
u32 dw[26];
};
struct BE_PCICFG_ANON_21_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1919][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_21_RSVD_AMAP {
u32 dw[1920];
};
struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_22_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_23_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_23_RSVD_AMAP {
u32 dw[896];
};
/* These PCI Configuration Space registers are for the Storage Function of
* BladeEngine (Function 0). In the memory map of the registers below their
* table,
*/
struct BE_PCICFG0_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[32]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 61 */
u8 rsvd25[32]; /* DWORD 62 */
u8 rsvd26[32]; /* DWORD 63 */
u8 rsvd27[32]; /* DWORD 64 */
u8 rsvd28[32]; /* DWORD 65 */
u8 rsvd29[32]; /* DWORD 66 */
u8 rsvd30[32]; /* DWORD 67 */
u8 rsvd31[32]; /* DWORD 68 */
u8 rsvd32[32]; /* DWORD 69 */
u8 rsvd33[32]; /* DWORD 70 */
u8 rsvd34[32]; /* DWORD 71 */
u8 rsvd35[32]; /* DWORD 72 */
u8 rsvd36[32]; /* DWORD 73 */
u8 rsvd37[32]; /* DWORD 74 */
u8 rsvd38[32]; /* DWORD 75 */
u8 rsvd39[32]; /* DWORD 76 */
u8 rsvd40[32]; /* DWORD 77 */
u8 rsvd41[32]; /* DWORD 78 */
u8 rsvd42[32]; /* DWORD 79 */
u8 rsvd43[32]; /* DWORD 80 */
u8 rsvd44[32]; /* DWORD 81 */
u8 rsvd45[32]; /* DWORD 82 */
u8 rsvd46[32]; /* DWORD 83 */
u8 rsvd47[32]; /* DWORD 84 */
u8 rsvd48[32]; /* DWORD 85 */
u8 rsvd49[32]; /* DWORD 86 */
u8 rsvd50[32]; /* DWORD 87 */
u8 rsvd51[32]; /* DWORD 88 */
u8 rsvd52[32]; /* DWORD 89 */
u8 rsvd53[32]; /* DWORD 90 */
u8 rsvd54[32]; /* DWORD 91 */
u8 rsvd55[32]; /* DWORD 92 */
u8 rsvd56[832]; /* DWORD 93 */
u8 rsvd57[32]; /* DWORD 119 */
u8 rsvd58[32]; /* DWORD 120 */
u8 rsvd59[32]; /* DWORD 121 */
u8 rsvd60[32]; /* DWORD 122 */
u8 rsvd61[32]; /* DWORD 123 */
u8 rsvd62[32]; /* DWORD 124 */
u8 rsvd63[32]; /* DWORD 125 */
u8 rsvd64[32]; /* DWORD 126 */
u8 rsvd65[32]; /* DWORD 127 */
u8 rsvd66[61440]; /* DWORD 128 */
struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
u8 rsvd67[28672]; /* DWORD 2176 */
u8 rsvd68[32]; /* DWORD 3072 */
u8 rsvd69[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG0_CSRMAP_AMAP {
u32 dw[4096];
};
struct BE_PCICFG_ANON_24_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_24_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_25_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_25_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_26_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_26_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_27_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_27_RSVD_AMAP {
u32 dw[2];
};
struct BE_PCICFG_ANON_28_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[3][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_28_RSVD_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_29_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[36][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_29_RSVD_AMAP {
u32 dw[37];
};
struct BE_PCICFG_ANON_30_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1930][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_30_RSVD_AMAP {
u32 dw[1931];
};
struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_31_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_32_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_32_RSVD_AMAP {
u32 dw[896];
};
/* This PCI configuration space register map is for the Networking Function of
* BladeEngine (Function 1).
*/
struct BE_PCICFG1_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[64]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 62 */
u8 rsvd25[32]; /* DWORD 63 */
u8 rsvd26[32]; /* DWORD 64 */
u8 rsvd27[32]; /* DWORD 65 */
u8 rsvd28[32]; /* DWORD 66 */
u8 rsvd29[32]; /* DWORD 67 */
u8 rsvd30[32]; /* DWORD 68 */
u8 rsvd31[32]; /* DWORD 69 */
u8 rsvd32[32]; /* DWORD 70 */
u8 rsvd33[32]; /* DWORD 71 */
u8 rsvd34[32]; /* DWORD 72 */
u8 rsvd35[32]; /* DWORD 73 */
u8 rsvd36[32]; /* DWORD 74 */
u8 rsvd37[128]; /* DWORD 75 */
u8 rsvd38[32]; /* DWORD 79 */
u8 rsvd39[1184]; /* DWORD 80 */
u8 rsvd40[61792]; /* DWORD 117 */
struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
u8 rsvd41[28672]; /* DWORD 2176 */
u8 rsvd42[32]; /* DWORD 3072 */
u8 rsvd43[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG1_CSRMAP_AMAP {
u32 dw[4096];
};
#endif /* __pcicfg_amap_h__ */

View File

@ -1,111 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __post_codes_amap_h__
#define __post_codes_amap_h__
/* --- MGMT_HBA_POST_STAGE_ENUM --- */
#define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */
#define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a
go-ahed from the host. */
#define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */
#define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip
workaround */
#define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum
test start. */
#define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test
done. */
#define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */
#define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */
#define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */
#define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */
#define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */
#define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */
#define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */
#define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */
#define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to
DDR start. */
#define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load
to DDR done. */
#define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code
starts execution. */
#define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */
#define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target
Discovery Start. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target
Discovery Done. */
#define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration
option is set in SEEPROM */
#define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */
#define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message
to switch */
#define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */
#define POST_STAGE_PARSE_XML (2820) /* Parse XML file */
#define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from
TFTP server */
#define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */
#define POST_STAGE_RC_DONE (2823) /* Remote configuration
complete */
#define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done,
reboot required */
#define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */
#define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST
and ready. */
#define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an
unrecoverable error. The
lower 3 hex digits of the
stage code identify the
unique error code.
*/
/* This structure defines the format of the MPU semaphore
* register when used for POST.
*/
struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u8 stage[16]; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 iscsi_driver_loaded; /* DWORD 0 */
u8 option_rom_installed; /* DWORD 0 */
u8 iscsi_ip_conflict; /* DWORD 0 */
u8 iscsi_no_ip; /* DWORD 0 */
u8 backup_fw; /* DWORD 0 */
u8 error; /* DWORD 0 */
} __packed;
struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u32 dw[1];
};
/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
#define POST_BIT_ISCSI_LOADED (26)
#define POST_BIT_OPTROM_INST (27)
#define POST_BIT_BAD_IP_ADDR (28)
#define POST_BIT_NO_IP_ADDR (29)
#define POST_BIT_BACKUP_FW (30)
#define POST_BIT_ERROR (31)
/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
#define POST_ISCSI_DRIVER_LOADED (67108864)
#define POST_OPTROM_INSTALLED (134217728)
#define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456)
#define POST_ISCSI_NO_IP_ADDRESS (536870912)
#define POST_BACKUP_FW_LOADED (1073741824)
#define POST_FATAL_ERROR (2147483648)
#endif /* __post_codes_amap_h__ */

View File

@ -1,68 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __regmap_amap_h__
#define __regmap_amap_h__
#include "pcicfg.h"
#include "ep.h"
#include "cev.h"
#include "mpu.h"
#include "doorbells.h"
/*
* This is the control and status register map for BladeEngine, showing
* the relative size and offset of each sub-module. The CSR registers
* are identical for the network and storage PCI functions. The
* CSR map is shown below, followed by details of each block,
* in sub-sections. The sub-sections begin with a description
* of CSRs that are instantiated in multiple blocks.
*/
struct BE_BLADE_ENGINE_CSRMAP_AMAP {
struct BE_MPU_CSRMAP_AMAP mpu;
u8 rsvd0[8192]; /* DWORD 256 */
u8 rsvd1[8192]; /* DWORD 512 */
struct BE_CEV_CSRMAP_AMAP cev;
u8 rsvd2[8192]; /* DWORD 1024 */
u8 rsvd3[8192]; /* DWORD 1280 */
u8 rsvd4[8192]; /* DWORD 1536 */
u8 rsvd5[8192]; /* DWORD 1792 */
u8 rsvd6[8192]; /* DWORD 2048 */
u8 rsvd7[8192]; /* DWORD 2304 */
u8 rsvd8[8192]; /* DWORD 2560 */
u8 rsvd9[8192]; /* DWORD 2816 */
u8 rsvd10[8192]; /* DWORD 3072 */
u8 rsvd11[8192]; /* DWORD 3328 */
u8 rsvd12[8192]; /* DWORD 3584 */
u8 rsvd13[8192]; /* DWORD 3840 */
u8 rsvd14[8192]; /* DWORD 4096 */
u8 rsvd15[8192]; /* DWORD 4352 */
u8 rsvd16[8192]; /* DWORD 4608 */
u8 rsvd17[8192]; /* DWORD 4864 */
u8 rsvd18[8192]; /* DWORD 5120 */
u8 rsvd19[8192]; /* DWORD 5376 */
u8 rsvd20[8192]; /* DWORD 5632 */
u8 rsvd21[8192]; /* DWORD 5888 */
u8 rsvd22[8192]; /* DWORD 6144 */
u8 rsvd23[17152][32]; /* DWORD 6400 */
} __packed;
struct BLADE_ENGINE_CSRMAP_AMAP {
u32 dw[23552];
};
#endif /* __regmap_amap_h__ */