Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (237 commits)
  Staging: android: binder: fix build errors
  Staging: android: add lowmemorykiller driver
  Staging: android: remove dummy android.c driver
  Staging: android: timed_gpio: Rename android_timed_gpio to timed_gpio
  Staging: android: add timed_gpio driver
  Staging: android: add ram_console driver
  Staging: android: add logging driver
  staging: android: binder: Fix use of euid
  Staging: android: binder: Fix gcc warnings about improper format specifiers for size_t in printk
  Staging: android: add binder driver
  Staging: add android framework
  Staging: epl: fix netdev->priv b0rkage
  Staging: epl: hr timers all run in hard irq context now
  Staging: epl: run Lindent on *.c files
  Staging: epl: run Lindent on *.h files
  Staging: epl: run Lindent on all user/*.h files
  Staging: epl: run Lindent on all kernel/*.h files
  Staging: add epl stack
  Staging: frontier: fix compiler warnings
  Staging: frontier: remove unused alphatrack_sysfs.c file
  ...
This commit is contained in:
Linus Torvalds 2009-01-06 17:04:29 -08:00
commit ce519e2327
778 changed files with 472708 additions and 26492 deletions

View File

@ -1325,6 +1325,8 @@ source "drivers/regulator/Kconfig"
source "drivers/uio/Kconfig"
source "drivers/staging/Kconfig"
endmenu
source "fs/Kconfig"

View File

@ -681,6 +681,8 @@ source "drivers/usb/Kconfig"
source "drivers/uwb/Kconfig"
source "drivers/staging/Kconfig"
source "arch/cris/Kconfig.debug"
source "security/Kconfig"

View File

@ -220,6 +220,8 @@ source "drivers/uwb/Kconfig"
endmenu
source "drivers/staging/Kconfig"
source "fs/Kconfig"
source "arch/h8300/Kconfig.debug"

View File

@ -49,6 +49,8 @@ source "drivers/staging/sxg/Kconfig"
source "drivers/staging/me4000/Kconfig"
source "drivers/staging/meilhaus/Kconfig"
source "drivers/staging/go7007/Kconfig"
source "drivers/staging/usbip/Kconfig"
@ -63,5 +65,35 @@ source "drivers/staging/at76_usb/Kconfig"
source "drivers/staging/poch/Kconfig"
source "drivers/staging/agnx/Kconfig"
source "drivers/staging/otus/Kconfig"
source "drivers/staging/rt2860/Kconfig"
source "drivers/staging/rt2870/Kconfig"
source "drivers/staging/benet/Kconfig"
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/asus_oled/Kconfig"
source "drivers/staging/panel/Kconfig"
source "drivers/staging/altpciechdma/Kconfig"
source "drivers/staging/rtl8187se/Kconfig"
source "drivers/staging/rspiusb/Kconfig"
source "drivers/staging/mimio/Kconfig"
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/epl/Kconfig"
source "drivers/staging/android/Kconfig"
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING

View File

@ -7,6 +7,7 @@ obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_SXG) += sxg/
obj-$(CONFIG_ME4000) += me4000/
obj-$(CONFIG_MEILHAUS) += meilhaus/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_USB_IP_COMMON) += usbip/
obj-$(CONFIG_W35UND) += winbond/
@ -14,3 +15,18 @@ obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_USB_ATMEL) += at76_usb/
obj-$(CONFIG_POCH) += poch/
obj-$(CONFIG_AGNX) += agnx/
obj-$(CONFIG_OTUS) += otus/
obj-$(CONFIG_RT2860) += rt2860/
obj-$(CONFIG_RT2870) += rt2870/
obj-$(CONFIG_BENET) += benet/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/
obj-$(CONFIG_ALTERA_PCIE_CHDMA) += altpciechdma/
obj-$(CONFIG_RTL8187SE) += rtl8187se/
obj-$(CONFIG_USB_RSPI) += rspiusb/
obj-$(CONFIG_INPUT_MIMIO) += mimio/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_EPL) += epl/
obj-$(CONFIG_ANDROID) += android/

View File

@ -0,0 +1,5 @@
config AGNX
tristate "Wireless Airgo AGNX support"
depends on WLAN_80211 && MAC80211
---help---
This is an experimental driver for Airgo AGNX00 wireless chip.

View File

@ -0,0 +1,8 @@
obj-$(CONFIG_AGNX) += agnx.o
agnx-objs := rf.o \
pci.o \
xmit.o \
table.o \
sta.o \
phy.o

22
drivers/staging/agnx/TODO Normal file
View File

@ -0,0 +1,22 @@
2008 7/18
The RX has can't receive OFDM packet correctly,
Guess it need be do RX calibrate.
before 2008 3/1
1: The RX get too much "CRC failed" pakets, it make the card work very unstable,
2: After running a while, the card will get infinity "RX Frame" and "Error"
interrupt, not know the root reason so far, try to fix it
3: Using two tx queue txd and txm but not only txm.
4: Set the hdr correctly.
5: Try to do recalibrate correvtly
6: To support G mode in future
7: Fix the mac address can't be readed and set correctly in BE machine.
8: Fix include and exclude FCS in promisous mode and manage mode
9: Using sta_notify to notice sta change
10: Turn on frame reception at the end of start
11: Guess the card support HW_MULTICAST_FILTER
12: The tx process should be implment atomic?
13: Using mac80211 function to control the TX&RX LED.

154
drivers/staging/agnx/agnx.h Normal file
View File

@ -0,0 +1,154 @@
#ifndef AGNX_H_
#define AGNX_H_
#include "xmit.h"
#define PFX KBUILD_MODNAME ": "
static inline u32 agnx_read32(void __iomem *mem_region, u32 offset)
{
return ioread32(mem_region + offset);
}
static inline void agnx_write32(void __iomem *mem_region, u32 offset, u32 val)
{
iowrite32(val, mem_region + offset);
}
/* static const struct ieee80211_rate agnx_rates_80211b[] = { */
/* { .rate = 10, */
/* .val = 0xa, */
/* .flags = IEEE80211_RATE_CCK }, */
/* { .rate = 20, */
/* .val = 0x14, */
/* .hw_value = -0x14, */
/* .flags = IEEE80211_RATE_CCK_2 }, */
/* { .rate = 55, */
/* .val = 0x37, */
/* .val2 = -0x37, */
/* .flags = IEEE80211_RATE_CCK_2 }, */
/* { .rate = 110, */
/* .val = 0x6e, */
/* .val2 = -0x6e, */
/* .flags = IEEE80211_RATE_CCK_2 } */
/* }; */
static const struct ieee80211_rate agnx_rates_80211g[] = {
/* { .bitrate = 10, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
/* { .bitrate = 20, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
/* { .bitrate = 55, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
/* { .bitrate = 110, .hw_value = 4, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
{ .bitrate = 10, .hw_value = 1, },
{ .bitrate = 20, .hw_value = 2, },
{ .bitrate = 55, .hw_value = 3, },
{ .bitrate = 110, .hw_value = 4,},
{ .bitrate = 60, .hw_value = 0xB, },
{ .bitrate = 90, .hw_value = 0xF, },
{ .bitrate = 120, .hw_value = 0xA },
{ .bitrate = 180, .hw_value = 0xE, },
// { .bitrate = 240, .hw_value = 0xd, },
{ .bitrate = 360, .hw_value = 0xD, },
{ .bitrate = 480, .hw_value = 0x8, },
{ .bitrate = 540, .hw_value = 0xC, },
};
static const struct ieee80211_channel agnx_channels[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
{ .center_freq = 2427, .hw_value = 4, },
{ .center_freq = 2432, .hw_value = 5, },
{ .center_freq = 2437, .hw_value = 6, },
{ .center_freq = 2442, .hw_value = 7, },
{ .center_freq = 2447, .hw_value = 8, },
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
{ .center_freq = 2467, .hw_value = 12, },
{ .center_freq = 2472, .hw_value = 13, },
{ .center_freq = 2484, .hw_value = 14, },
};
#define NUM_DRIVE_MODES 2
/* Agnx operate mode */
enum {
AGNX_MODE_80211A,
AGNX_MODE_80211A_OOB,
AGNX_MODE_80211A_MIMO,
AGNX_MODE_80211B_SHORT,
AGNX_MODE_80211B_LONG,
AGNX_MODE_80211G,
AGNX_MODE_80211G_OOB,
AGNX_MODE_80211G_MIMO,
};
enum {
AGNX_UNINIT,
AGNX_START,
AGNX_STOP,
};
struct agnx_priv {
struct pci_dev *pdev;
struct ieee80211_hw *hw;
spinlock_t lock;
struct mutex mutex;
unsigned int init_status;
void __iomem *ctl; /* pointer to base ram address */
void __iomem *data; /* pointer to mem region #2 */
struct agnx_ring rx;
struct agnx_ring txm;
struct agnx_ring txd;
/* Need volatile? */
u32 irq_status;
struct delayed_work periodic_work; /* Periodic tasks like recalibrate*/
struct ieee80211_low_level_stats stats;
// unsigned int phymode;
int mode;
int channel;
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
u8 revid;
struct ieee80211_supported_band band;
};
#define AGNX_CHAINS_MAX 6
#define AGNX_PERIODIC_DELAY 60000 /* unit: ms */
#define LOCAL_STAID 0 /* the station entry for the card itself */
#define BSSID_STAID 1 /* the station entry for the bsssid AP */
#define spi_delay() udelay(40)
#define eeprom_delay() udelay(40)
#define routing_table_delay() udelay(50)
/* PDU pool MEM region #2 */
#define AGNX_PDUPOOL 0x40000 /* PDU pool */
#define AGNX_PDUPOOL_SIZE 0x8000 /* PDU pool size*/
#define AGNX_PDU_TX_WQ 0x41000 /* PDU list TX workqueue */
#define AGNX_PDU_FREE 0x41800 /* Free Pool */
#define PDU_SIZE 0x80 /* Free Pool node size */
#define PDU_FREE_CNT 0xd0 /* Free pool node count */
/* RF stuffs */
extern void rf_chips_init(struct agnx_priv *priv);
extern void spi_rc_write(void __iomem *mem_region, u32 chip_ids, u32 sw);
extern void calibrate_oscillator(struct agnx_priv *priv);
extern void do_calibration(struct agnx_priv *priv);
extern void antenna_calibrate(struct agnx_priv *priv);
extern void __antenna_calibrate(struct agnx_priv *priv);
extern void print_offsets(struct agnx_priv *priv);
extern int agnx_set_channel(struct agnx_priv *priv, unsigned int channel);
#endif /* AGNX_H_ */

View File

@ -0,0 +1,418 @@
#ifndef AGNX_DEBUG_H_
#define AGNX_DEBUG_H_
#include "agnx.h"
#include "phy.h"
#include "sta.h"
#include "xmit.h"
#define AGNX_TRACE printk(KERN_ERR PFX "function:%s line:%d\n", __func__, __LINE__)
#define PRINTK_LE16(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", le16_to_cpu(var))
#define PRINTK_LE32(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", le32_to_cpu(var))
#define PRINTK_U8(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.2x\n", var)
#define PRINTK_BE16(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", be16_to_cpu(var))
#define PRINTK_BE32(prefix, var) printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", be32_to_cpu(var))
#define PRINTK_BITS(prefix, field) printk(KERN_DEBUG PFX #prefix ": " #field ": 0x%x\n", (reg & field) >> field##_SHIFT)
static inline void agnx_bug(char *reason)
{
printk(KERN_ERR PFX "%s\n", reason);
BUG();
}
static inline void agnx_print_desc(struct agnx_desc *desc)
{
u32 reg = be32_to_cpu(desc->frag);
PRINTK_BITS(DESC, PACKET_LEN);
if (reg & FIRST_FRAG) {
PRINTK_BITS(DESC, FIRST_PACKET_MASK);
PRINTK_BITS(DESC, FIRST_RESERV2);
PRINTK_BITS(DESC, FIRST_TKIP_ERROR);
PRINTK_BITS(DESC, FIRST_TKIP_PACKET);
PRINTK_BITS(DESC, FIRST_RESERV1);
PRINTK_BITS(DESC, FIRST_FRAG_LEN);
} else {
PRINTK_BITS(DESC, SUB_RESERV2);
PRINTK_BITS(DESC, SUB_TKIP_ERROR);
PRINTK_BITS(DESC, SUB_TKIP_PACKET);
PRINTK_BITS(DESC, SUB_RESERV1);
PRINTK_BITS(DESC, SUB_FRAG_LEN);
}
PRINTK_BITS(DESC, FIRST_FRAG);
PRINTK_BITS(DESC, LAST_FRAG);
PRINTK_BITS(DESC, OWNER);
}
static inline void dump_ieee80211b_phy_hdr(__be32 _11b0, __be32 _11b1)
{
}
static inline void agnx_print_hdr(struct agnx_hdr *hdr)
{
u32 reg;
int i;
reg = be32_to_cpu(hdr->reg0);
PRINTK_BITS(HDR, RTS);
PRINTK_BITS(HDR, MULTICAST);
PRINTK_BITS(HDR, ACK);
PRINTK_BITS(HDR, TM);
PRINTK_BITS(HDR, RELAY);
PRINTK_BITS(HDR, REVISED_FCS);
PRINTK_BITS(HDR, NEXT_BUFFER_ADDR);
reg = be32_to_cpu(hdr->reg1);
PRINTK_BITS(HDR, MAC_HDR_LEN);
PRINTK_BITS(HDR, DURATION_OVERIDE);
PRINTK_BITS(HDR, PHY_HDR_OVERIDE);
PRINTK_BITS(HDR, CRC_FAIL);
PRINTK_BITS(HDR, SEQUENCE_NUMBER);
PRINTK_BITS(HDR, BUFF_HEAD_ADDR);
reg = be32_to_cpu(hdr->reg2);
PRINTK_BITS(HDR, PDU_COUNT);
PRINTK_BITS(HDR, WEP_KEY);
PRINTK_BITS(HDR, USES_WEP_KEY);
PRINTK_BITS(HDR, KEEP_ALIVE);
PRINTK_BITS(HDR, BUFF_TAIL_ADDR);
reg = be32_to_cpu(hdr->reg3);
PRINTK_BITS(HDR, CTS_11G);
PRINTK_BITS(HDR, RTS_11G);
PRINTK_BITS(HDR, FRAG_SIZE);
PRINTK_BITS(HDR, PAYLOAD_LEN);
PRINTK_BITS(HDR, FRAG_NUM);
reg = be32_to_cpu(hdr->reg4);
PRINTK_BITS(HDR, RELAY_STAID);
PRINTK_BITS(HDR, STATION_ID);
PRINTK_BITS(HDR, WORKQUEUE_ID);
reg = be32_to_cpu(hdr->reg5);
/* printf the route flag */
PRINTK_BITS(HDR, ROUTE_HOST);
PRINTK_BITS(HDR, ROUTE_CARD_CPU);
PRINTK_BITS(HDR, ROUTE_ENCRYPTION);
PRINTK_BITS(HDR, ROUTE_TX);
PRINTK_BITS(HDR, ROUTE_RX1);
PRINTK_BITS(HDR, ROUTE_RX2);
PRINTK_BITS(HDR, ROUTE_COMPRESSION);
PRINTK_BE32(HDR, hdr->_11g0);
PRINTK_BE32(HDR, hdr->_11g1);
PRINTK_BE32(HDR, hdr->_11b0);
PRINTK_BE32(HDR, hdr->_11b1);
dump_ieee80211b_phy_hdr(hdr->_11b0, hdr->_11b1);
/* Fixme */
for (i = 0; i < ARRAY_SIZE(hdr->mac_hdr); i++) {
if (i == 0)
printk(KERN_DEBUG PFX "IEEE80211 HDR: ");
printk("%.2x ", hdr->mac_hdr[i]);
if (i + 1 == ARRAY_SIZE(hdr->mac_hdr))
printk("\n");
}
PRINTK_BE16(HDR, hdr->rts_duration);
PRINTK_BE16(HDR, hdr->last_duration);
PRINTK_BE16(HDR, hdr->sec_last_duration);
PRINTK_BE16(HDR, hdr->other_duration);
PRINTK_BE16(HDR, hdr->tx_other_duration);
PRINTK_BE16(HDR, hdr->last_11g_len);
PRINTK_BE16(HDR, hdr->other_11g_len);
PRINTK_BE16(HDR, hdr->last_11b_len);
PRINTK_BE16(HDR, hdr->other_11b_len);
/* FIXME */
reg = be16_to_cpu(hdr->reg6);
PRINTK_BITS(HDR, MBF);
PRINTK_BITS(HDR, RSVD4);
PRINTK_BE16(HDR, hdr->rx_frag_stat);
PRINTK_BE32(HDR, hdr->time_stamp);
PRINTK_BE32(HDR, hdr->phy_stats_hi);
PRINTK_BE32(HDR, hdr->phy_stats_lo);
PRINTK_BE32(HDR, hdr->mic_key0);
PRINTK_BE32(HDR, hdr->mic_key1);
} /* agnx_print_hdr */
static inline void agnx_print_rx_hdr(struct agnx_hdr *hdr)
{
agnx_print_hdr(hdr);
PRINTK_BE16(HDR, hdr->rx.rx_packet_duration);
PRINTK_BE16(HDR, hdr->rx.replay_cnt);
PRINTK_U8(HDR, hdr->rx_channel);
}
static inline void agnx_print_tx_hdr(struct agnx_hdr *hdr)
{
agnx_print_hdr(hdr);
PRINTK_U8(HDR, hdr->tx.long_retry_limit);
PRINTK_U8(HDR, hdr->tx.short_retry_limit);
PRINTK_U8(HDR, hdr->tx.long_retry_cnt);
PRINTK_U8(HDR, hdr->tx.short_retry_cnt);
PRINTK_U8(HDR, hdr->rx_channel);
}
static inline void
agnx_print_sta_power(struct agnx_priv *priv, unsigned int sta_idx)
{
struct agnx_sta_power power;
u32 reg;
get_sta_power(priv, &power, sta_idx);
reg = le32_to_cpu(power.reg);
PRINTK_BITS(STA_POWER, SIGNAL);
PRINTK_BITS(STA_POWER, RATE);
PRINTK_BITS(STA_POWER, TIFS);
PRINTK_BITS(STA_POWER, EDCF);
PRINTK_BITS(STA_POWER, CHANNEL_BOND);
PRINTK_BITS(STA_POWER, PHY_MODE);
PRINTK_BITS(STA_POWER, POWER_LEVEL);
PRINTK_BITS(STA_POWER, NUM_TRANSMITTERS);
}
static inline void
agnx_print_sta_tx_wq(struct agnx_priv *priv, unsigned int sta_idx, unsigned int wq_idx)
{
struct agnx_sta_tx_wq tx_wq;
u32 reg;
get_sta_tx_wq(priv, &tx_wq, sta_idx, wq_idx);
reg = le32_to_cpu(tx_wq.reg0);
PRINTK_BITS(STA_TX_WQ, TAIL_POINTER);
PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_LOW);
reg = le32_to_cpu(tx_wq.reg3);
PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_HIGH);
PRINTK_BITS(STA_TX_WQ, ACK_POINTER_LOW);
reg = le32_to_cpu(tx_wq.reg1);
PRINTK_BITS(STA_TX_WQ, ACK_POINTER_HIGH);
PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_TAIL_PACK_CNT);
PRINTK_BITS(STA_TX_WQ, ACK_TIMOUT_TAIL_PACK_CNT);
reg = le32_to_cpu(tx_wq.reg2);
PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_BYTE_CNT);
PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_FRAG_CNT);
PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_ACK_TYPE);
PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_VALID);
}
static inline void agnx_print_sta_traffic(struct agnx_sta_traffic *traffic)
{
u32 reg;
reg = le32_to_cpu(traffic->reg0);
PRINTK_BITS(STA_TRAFFIC, ACK_TIMOUT_CNT);
PRINTK_BITS(STA_TRAFFIC, TRAFFIC_ACK_TYPE);
PRINTK_BITS(STA_TRAFFIC, NEW_PACKET);
PRINTK_BITS(STA_TRAFFIC, TRAFFIC_VALID);
PRINTK_BITS(STA_TRAFFIC, RX_HDR_DESC_POINTER);
reg = le32_to_cpu(traffic->reg1);
PRINTK_BITS(STA_TRAFFIC, RX_PACKET_TIMESTAMP);
PRINTK_BITS(STA_TRAFFIC, TRAFFIC_RESERVED);
PRINTK_BITS(STA_TRAFFIC, SV);
PRINTK_BITS(STA_TRAFFIC, RX_SEQUENCE_NUM);
PRINTK_LE32(STA_TRAFFIC, traffic->tx_replay_cnt_low);
PRINTK_LE16(STA_TRAFFIC, traffic->tx_replay_cnt_high);
PRINTK_LE16(STA_TRAFFIC, traffic->rx_replay_cnt_high);
PRINTK_LE32(STA_TRAFFIC, traffic->rx_replay_cnt_low);
}
static inline void agnx_print_sta(struct agnx_priv *priv, unsigned int sta_idx)
{
struct agnx_sta station;
struct agnx_sta *sta = &station;
u32 reg;
unsigned int i;
get_sta(priv, sta, sta_idx);
for (i = 0; i < 4; i++)
PRINTK_LE32(STA, sta->tx_session_keys[i]);
for (i = 0; i < 4; i++)
PRINTK_LE32(STA, sta->rx_session_keys[i]);
reg = le32_to_cpu(sta->reg);
PRINTK_BITS(STA, ID_1);
PRINTK_BITS(STA, ID_0);
PRINTK_BITS(STA, ENABLE_CONCATENATION);
PRINTK_BITS(STA, ENABLE_DECOMPRESSION);
PRINTK_BITS(STA, STA_RESERVED);
PRINTK_BITS(STA, EAP);
PRINTK_BITS(STA, ED_NULL);
PRINTK_BITS(STA, ENCRYPTION_POLICY);
PRINTK_BITS(STA, DEFINED_KEY_ID);
PRINTK_BITS(STA, FIXED_KEY);
PRINTK_BITS(STA, KEY_VALID);
PRINTK_BITS(STA, STATION_VALID);
PRINTK_LE32(STA, sta->tx_aes_blks_unicast);
PRINTK_LE32(STA, sta->rx_aes_blks_unicast);
PRINTK_LE16(STA, sta->aes_format_err_unicast_cnt);
PRINTK_LE16(STA, sta->aes_replay_unicast);
PRINTK_LE16(STA, sta->aes_decrypt_err_unicast);
PRINTK_LE16(STA, sta->aes_decrypt_err_default);
PRINTK_LE16(STA, sta->single_retry_packets);
PRINTK_LE16(STA, sta->failed_tx_packets);
PRINTK_LE16(STA, sta->muti_retry_packets);
PRINTK_LE16(STA, sta->ack_timeouts);
PRINTK_LE16(STA, sta->frag_tx_cnt);
PRINTK_LE16(STA, sta->rts_brq_sent);
PRINTK_LE16(STA, sta->tx_packets);
PRINTK_LE16(STA, sta->cts_back_timeout);
PRINTK_LE32(STA, sta->phy_stats_high);
PRINTK_LE32(STA, sta->phy_stats_low);
// for (i = 0; i < 8; i++)
agnx_print_sta_traffic(sta->traffic + 0);
PRINTK_LE16(STA, sta->traffic_class0_frag_success);
PRINTK_LE16(STA, sta->traffic_class1_frag_success);
PRINTK_LE16(STA, sta->traffic_class2_frag_success);
PRINTK_LE16(STA, sta->traffic_class3_frag_success);
PRINTK_LE16(STA, sta->traffic_class4_frag_success);
PRINTK_LE16(STA, sta->traffic_class5_frag_success);
PRINTK_LE16(STA, sta->traffic_class6_frag_success);
PRINTK_LE16(STA, sta->traffic_class7_frag_success);
PRINTK_LE16(STA, sta->num_frag_non_prime_rates);
PRINTK_LE16(STA, sta->ack_timeout_non_prime_rates);
}
static inline void dump_ieee80211_hdr(struct ieee80211_hdr *hdr, char *tag)
{
u16 fctl;
int hdrlen;
DECLARE_MAC_BUF(mac);
fctl = le16_to_cpu(hdr->frame_control);
switch (fctl & IEEE80211_FCTL_FTYPE) {
case IEEE80211_FTYPE_DATA:
printk(PFX "%s DATA ", tag);
break;
case IEEE80211_FTYPE_CTL:
printk(PFX "%s CTL ", tag);
break;
case IEEE80211_FTYPE_MGMT:
printk(PFX "%s MGMT ", tag);
switch(fctl & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_ASSOC_REQ:
printk("SubType: ASSOC_REQ ");
break;
case IEEE80211_STYPE_ASSOC_RESP:
printk("SubType: ASSOC_RESP ");
break;
case IEEE80211_STYPE_REASSOC_REQ:
printk("SubType: REASSOC_REQ ");
break;
case IEEE80211_STYPE_REASSOC_RESP:
printk("SubType: REASSOC_RESP ");
break;
case IEEE80211_STYPE_PROBE_REQ:
printk("SubType: PROBE_REQ ");
break;
case IEEE80211_STYPE_PROBE_RESP:
printk("SubType: PROBE_RESP ");
break;
case IEEE80211_STYPE_BEACON:
printk("SubType: BEACON ");
break;
case IEEE80211_STYPE_ATIM:
printk("SubType: ATIM ");
break;
case IEEE80211_STYPE_DISASSOC:
printk("SubType: DISASSOC ");
break;
case IEEE80211_STYPE_AUTH:
printk("SubType: AUTH ");
break;
case IEEE80211_STYPE_DEAUTH:
printk("SubType: DEAUTH ");
break;
case IEEE80211_STYPE_ACTION:
printk("SubType: ACTION ");
break;
default:
printk("SubType: Unknow\n");
}
break;
default:
printk(PFX "%s Packet type: Unknow\n", tag);
}
hdrlen = ieee80211_hdrlen(fctl);
if (hdrlen >= 4)
printk("FC=0x%04x DUR=0x%04x",
fctl, le16_to_cpu(hdr->duration_id));
if (hdrlen >= 10)
printk(" A1=%s", print_mac(mac, hdr->addr1));
if (hdrlen >= 16)
printk(" A2=%s", print_mac(mac, hdr->addr2));
if (hdrlen >= 24)
printk(" A3=%s", print_mac(mac, hdr->addr3));
if (hdrlen >= 30)
printk(" A4=%s", print_mac(mac, hdr->addr4));
printk("\n");
}
static inline void dump_txm_registers(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i <=0x1e8; i += 4) {
printk(KERN_DEBUG PFX "TXM: %x---> 0x%.8x\n", i, ioread32(ctl + i));
}
}
static inline void dump_rxm_registers(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i <=0x108; i += 4)
printk(KERN_DEBUG PFX "RXM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2000 + i));
}
static inline void dump_bm_registers(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i <=0x90; i += 4)
printk(KERN_DEBUG PFX "BM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2c00 + i));
}
static inline void dump_cir_registers(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i <=0xb8; i += 4)
printk(KERN_DEBUG PFX "CIR: %x---> 0x%.8x\n", i, ioread32(ctl + 0x3000 + i));
}
#endif /* AGNX_DEBUG_H_ */

644
drivers/staging/agnx/pci.c Normal file
View File

@ -0,0 +1,644 @@
/**
* Airgo MIMO wireless driver
*
* Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
* Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
* works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "agnx.h"
#include "debug.h"
#include "xmit.h"
#include "phy.h"
MODULE_AUTHOR("Li YanBo <dreamfly281@gmail.com>");
MODULE_DESCRIPTION("Airgo MIMO PCI wireless driver");
MODULE_LICENSE("GPL");
static struct pci_device_id agnx_pci_id_tbl[] __devinitdata = {
{ PCI_DEVICE(0x17cb, 0x0001) }, /* Beklin F5d8010, Netgear WGM511 etc */
{ PCI_DEVICE(0x17cb, 0x0002) }, /* Netgear Wpnt511 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, agnx_pci_id_tbl);
static inline void agnx_interrupt_ack(struct agnx_priv *priv, u32 *reason)
{
void __iomem *ctl = priv->ctl;
u32 reg;
if ( *reason & AGNX_STAT_RX ) {
/* Mark complete RX */
reg = ioread32(ctl + AGNX_CIR_RXCTL);
reg |= 0x4;
iowrite32(reg, ctl + AGNX_CIR_RXCTL);
/* disable Rx interrupt */
}
if ( *reason & AGNX_STAT_TX ) {
reg = ioread32(ctl + AGNX_CIR_TXDCTL);
if (reg & 0x4) {
iowrite32(reg, ctl + AGNX_CIR_TXDCTL);
*reason |= AGNX_STAT_TXD;
}
reg = ioread32(ctl + AGNX_CIR_TXMCTL);
if (reg & 0x4) {
iowrite32(reg, ctl + AGNX_CIR_TXMCTL);
*reason |= AGNX_STAT_TXM;
}
}
if ( *reason & AGNX_STAT_X ) {
/* reg = ioread32(ctl + AGNX_INT_STAT); */
/* iowrite32(reg, ctl + AGNX_INT_STAT); */
/* /\* FIXME reinit interrupt mask *\/ */
/* reg = 0xc390bf9 & ~IRQ_TX_BEACON; */
/* reg &= ~IRQ_TX_DISABLE; */
/* iowrite32(reg, ctl + AGNX_INT_MASK); */
/* iowrite32(0x800, ctl + AGNX_CIR_BLKCTL); */
}
} /* agnx_interrupt_ack */
static irqreturn_t agnx_interrupt_handler(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct agnx_priv *priv = dev->priv;
void __iomem *ctl = priv->ctl;
irqreturn_t ret = IRQ_NONE;
u32 irq_reason;
spin_lock(&priv->lock);
// printk(KERN_ERR PFX "Get a interrupt %s\n", __func__);
if (priv->init_status != AGNX_START)
goto out;
/* FiXME Here has no lock, Is this will lead to race? */
irq_reason = ioread32(ctl + AGNX_CIR_BLKCTL);
if (!(irq_reason & 0x7))
goto out;
ret = IRQ_HANDLED;
priv->irq_status = ioread32(ctl + AGNX_INT_STAT);
// printk(PFX "Interrupt reason is 0x%x\n", irq_reason);
/* Make sure the txm and txd flags don't conflict with other unknown
interrupt flag, maybe is not necessary */
irq_reason &= 0xF;
disable_rx_interrupt(priv);
/* TODO Make sure the card finished initialized */
agnx_interrupt_ack(priv, &irq_reason);
if ( irq_reason & AGNX_STAT_RX )
handle_rx_irq(priv);
if ( irq_reason & AGNX_STAT_TXD )
handle_txd_irq(priv);
if ( irq_reason & AGNX_STAT_TXM )
handle_txm_irq(priv);
if ( irq_reason & AGNX_STAT_X )
handle_other_irq(priv);
enable_rx_interrupt(priv);
out:
spin_unlock(&priv->lock);
return ret;
} /* agnx_interrupt_handler */
/* FIXME */
static int agnx_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
AGNX_TRACE;
return _agnx_tx(dev->priv, skb);
} /* agnx_tx */
static int agnx_get_mac_address(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
/* Attention! directly read the MAC or other date from EEPROM will
lead to cardbus(WGM511) lock up when write to PM PLL register */
reg = agnx_read32(ctl, 0x3544);
udelay(40);
reg = agnx_read32(ctl, 0x354c);
udelay(50);
/* Get the mac address */
reg = agnx_read32(ctl, 0x3544);
udelay(40);
/* HACK */
reg = cpu_to_le32(reg);
priv->mac_addr[0] = ((u8 *)&reg)[2];
priv->mac_addr[1] = ((u8 *)&reg)[3];
reg = agnx_read32(ctl, 0x3548);
udelay(50);
*((u32 *)(priv->mac_addr + 2)) = cpu_to_le32(reg);
if (!is_valid_ether_addr(priv->mac_addr)) {
DECLARE_MAC_BUF(mbuf);
printk(KERN_WARNING PFX "read mac %s\n", print_mac(mbuf, priv->mac_addr));
printk(KERN_WARNING PFX "Invalid hwaddr! Using random hwaddr\n");
random_ether_addr(priv->mac_addr);
}
return 0;
} /* agnx_get_mac_address */
static int agnx_alloc_rings(struct agnx_priv *priv)
{
unsigned int len;
AGNX_TRACE;
/* Allocate RX/TXM/TXD rings info */
priv->rx.size = AGNX_RX_RING_SIZE;
priv->txm.size = AGNX_TXM_RING_SIZE;
priv->txd.size = AGNX_TXD_RING_SIZE;
len = priv->rx.size + priv->txm.size + priv->txd.size;
// priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_KERNEL);
priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_ATOMIC);
if (!priv->rx.info)
return -ENOMEM;
priv->txm.info = priv->rx.info + priv->rx.size;
priv->txd.info = priv->txm.info + priv->txm.size;
/* Allocate RX/TXM/TXD descriptors */
priv->rx.desc = pci_alloc_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
&priv->rx.dma);
if (!priv->rx.desc) {
kfree(priv->rx.info);
return -ENOMEM;
}
priv->txm.desc = priv->rx.desc + priv->rx.size;
priv->txm.dma = priv->rx.dma + sizeof(struct agnx_desc) * priv->rx.size;
priv->txd.desc = priv->txm.desc + priv->txm.size;
priv->txd.dma = priv->txm.dma + sizeof(struct agnx_desc) * priv->txm.size;
return 0;
} /* agnx_alloc_rings */
static void rings_free(struct agnx_priv *priv)
{
unsigned int len = priv->rx.size + priv->txm.size + priv->txd.size;
unsigned long flags;
AGNX_TRACE;
spin_lock_irqsave(&priv->lock, flags);
kfree(priv->rx.info);
pci_free_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
priv->rx.desc, priv->rx.dma);
spin_unlock_irqrestore(&priv->lock, flags);
}
#if 0
static void agnx_periodic_work_handler(struct work_struct *work)
{
struct agnx_priv *priv = container_of(work, struct agnx_priv,
periodic_work.work);
// unsigned long flags;
unsigned long delay;
/* fixme: using mutex?? */
// spin_lock_irqsave(&priv->lock, flags);
/* TODO Recalibrate*/
// calibrate_oscillator(priv);
// antenna_calibrate(priv);
// agnx_send_packet(priv, 997);
/* FIXME */
/* if (debug == 3) */
/* delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
/* else */
delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY);
// delay = round_jiffies(HZ * 15);
queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay);
// spin_unlock_irqrestore(&priv->lock, flags);
}
#endif
static int agnx_start(struct ieee80211_hw *dev)
{
struct agnx_priv *priv = dev->priv;
/* unsigned long delay; */
int err = 0;
AGNX_TRACE;
err = agnx_alloc_rings(priv);
if (err) {
printk(KERN_ERR PFX "Can't alloc RX/TXM/TXD rings\n");
goto out;
}
err = request_irq(priv->pdev->irq, &agnx_interrupt_handler,
IRQF_SHARED, "agnx_pci", dev);
if (err) {
printk(KERN_ERR PFX "Failed to register IRQ handler\n");
rings_free(priv);
goto out;
}
// mdelay(500);
might_sleep();
agnx_hw_init(priv);
// mdelay(500);
might_sleep();
priv->init_status = AGNX_START;
/* INIT_DELAYED_WORK(&priv->periodic_work, agnx_periodic_work_handler); */
/* delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
/* queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay); */
out:
return err;
} /* agnx_start */
static void agnx_stop(struct ieee80211_hw *dev)
{
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
priv->init_status = AGNX_STOP;
/* make sure hardware will not generate irq */
agnx_hw_reset(priv);
free_irq(priv->pdev->irq, dev);
flush_workqueue(priv->hw->workqueue);
// cancel_delayed_work_sync(&priv->periodic_work);
unfill_rings(priv);
rings_free(priv);
}
static int agnx_config(struct ieee80211_hw *dev,
struct ieee80211_conf *conf)
{
struct agnx_priv *priv = dev->priv;
int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
AGNX_TRACE;
spin_lock(&priv->lock);
/* FIXME need priv lock? */
if (channel != priv->channel) {
priv->channel = channel;
agnx_set_channel(priv, priv->channel);
}
spin_unlock(&priv->lock);
return 0;
}
static int agnx_config_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_if_conf *conf)
{
struct agnx_priv *priv = dev->priv;
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
spin_lock(&priv->lock);
if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
// u32 reghi, reglo;
agnx_set_bssid(priv, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
hash_write(priv, conf->bssid, BSSID_STAID);
sta_init(priv, BSSID_STAID);
/* FIXME needed? */
sta_power_init(priv, BSSID_STAID);
agnx_write32(ctl, AGNX_BM_MTSM, 0xff & ~0x1);
}
spin_unlock(&priv->lock);
return 0;
} /* agnx_config_interface */
static void agnx_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
int mc_count, struct dev_mc_list *mclist)
{
unsigned int new_flags = 0;
*total_flags = new_flags;
/* TODO */
}
static int agnx_add_interface(struct ieee80211_hw *dev,
struct ieee80211_if_init_conf *conf)
{
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
spin_lock(&priv->lock);
/* FIXME */
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
switch (conf->type) {
case NL80211_IFTYPE_STATION:
priv->mode = conf->type;
break;
default:
return -EOPNOTSUPP;
}
spin_unlock(&priv->lock);
return 0;
}
static void agnx_remove_interface(struct ieee80211_hw *dev,
struct ieee80211_if_init_conf *conf)
{
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
/* TODO */
priv->mode = NL80211_IFTYPE_MONITOR;
}
static int agnx_get_stats(struct ieee80211_hw *dev,
struct ieee80211_low_level_stats *stats)
{
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
spin_lock(&priv->lock);
/* TODO !! */
memcpy(stats, &priv->stats, sizeof(*stats));
spin_unlock(&priv->lock);
return 0;
}
static u64 agnx_get_tsft(struct ieee80211_hw *dev)
{
void __iomem *ctl = ((struct agnx_priv *)dev->priv)->ctl;
u32 tsftl;
u64 tsft;
AGNX_TRACE;
/* FIXME */
tsftl = ioread32(ctl + AGNX_TXM_TIMESTAMPLO);
tsft = ioread32(ctl + AGNX_TXM_TIMESTAMPHI);
tsft <<= 32;
tsft |= tsftl;
return tsft;
}
static int agnx_get_tx_stats(struct ieee80211_hw *dev,
struct ieee80211_tx_queue_stats *stats)
{
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
/* FIXME now we just using txd queue, but should using txm queue too */
stats[0].len = (priv->txd.idx - priv->txd.idx_sent) / 2;
stats[0].limit = priv->txd.size - 2;
stats[0].count = priv->txd.idx / 2;
return 0;
}
static struct ieee80211_ops agnx_ops = {
.tx = agnx_tx,
.start = agnx_start,
.stop = agnx_stop,
.add_interface = agnx_add_interface,
.remove_interface = agnx_remove_interface,
.config = agnx_config,
.config_interface = agnx_config_interface,
.configure_filter = agnx_configure_filter,
.get_stats = agnx_get_stats,
.get_tx_stats = agnx_get_tx_stats,
.get_tsf = agnx_get_tsft
};
static void __devexit agnx_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct agnx_priv *priv = dev->priv;
AGNX_TRACE;
if (!dev)
return;
ieee80211_unregister_hw(dev);
pci_iounmap(pdev, priv->ctl);
pci_iounmap(pdev, priv->data);
pci_release_regions(pdev);
pci_disable_device(pdev);
ieee80211_free_hw(dev);
}
static int __devinit agnx_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *dev;
struct agnx_priv *priv;
u32 mem_addr0, mem_len0;
u32 mem_addr1, mem_len1;
int err;
DECLARE_MAC_BUF(mac);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR PFX "Can't enable new PCI device\n");
return err;
}
/* get pci resource */
mem_addr0 = pci_resource_start(pdev, 0);
mem_len0 = pci_resource_len(pdev, 0);
mem_addr1 = pci_resource_start(pdev, 1);
mem_len1 = pci_resource_len(pdev, 1);
printk(KERN_DEBUG PFX "Memaddr0 is %x, length is %x\n", mem_addr0, mem_len0);
printk(KERN_DEBUG PFX "Memaddr1 is %x, length is %x\n", mem_addr1, mem_len1);
err = pci_request_regions(pdev, "agnx-pci");
if (err) {
printk(KERN_ERR PFX "Can't obtain PCI resource\n");
return err;
}
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_ERR PFX "No suitable DMA available\n");
goto err_free_reg;
}
pci_set_master(pdev);
printk(KERN_DEBUG PFX "pdev->irq is %d\n", pdev->irq);
dev = ieee80211_alloc_hw(sizeof(*priv), &agnx_ops);
if (!dev) {
printk(KERN_ERR PFX "ieee80211 alloc failed\n");
err = -ENOMEM;
goto err_free_reg;
}
/* init priv */
priv = dev->priv;
memset(priv, 0, sizeof(*priv));
priv->mode = NL80211_IFTYPE_MONITOR;
priv->pdev = pdev;
priv->hw = dev;
spin_lock_init(&priv->lock);
priv->init_status = AGNX_UNINIT;
/* Map mem #1 and #2 */
priv->ctl = pci_iomap(pdev, 0, mem_len0);
// printk(KERN_DEBUG PFX"MEM1 mapped address is 0x%p\n", priv->ctl);
if (!priv->ctl) {
printk(KERN_ERR PFX "Can't map device memory\n");
goto err_free_dev;
}
priv->data = pci_iomap(pdev, 1, mem_len1);
printk(KERN_DEBUG PFX "MEM2 mapped address is 0x%p\n", priv->data);
if (!priv->data) {
printk(KERN_ERR PFX "Can't map device memory\n");
goto err_iounmap2;
}
pci_read_config_byte(pdev, PCI_REVISION_ID, &priv->revid);
priv->band.channels = (struct ieee80211_channel *)agnx_channels;
priv->band.n_channels = ARRAY_SIZE(agnx_channels);
priv->band.bitrates = (struct ieee80211_rate *)agnx_rates_80211g;
priv->band.n_bitrates = ARRAY_SIZE(agnx_rates_80211g);
/* Init ieee802.11 dev */
SET_IEEE80211_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev, dev);
dev->extra_tx_headroom = sizeof(struct agnx_hdr);
/* FIXME It only include FCS in promious mode but not manage mode */
/* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS; */
dev->channel_change_time = 5000;
dev->max_signal = 100;
/* FIXME */
dev->queues = 1;
agnx_get_mac_address(priv);
SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
/* /\* FIXME *\/ */
/* for (i = 1; i < NUM_DRIVE_MODES; i++) { */
/* err = ieee80211_register_hwmode(dev, &priv->modes[i]); */
/* if (err) { */
/* printk(KERN_ERR PFX "Can't register hwmode\n"); */
/* goto err_iounmap; */
/* } */
/* } */
priv->channel = 1;
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
err = ieee80211_register_hw(dev);
if (err) {
printk(KERN_ERR PFX "Can't register hardware\n");
goto err_iounmap;
}
agnx_hw_reset(priv);
printk(PFX "%s: hwaddr %s, Rev 0x%02x\n", wiphy_name(dev->wiphy),
print_mac(mac, dev->wiphy->perm_addr), priv->revid);
return 0;
err_iounmap:
pci_iounmap(pdev, priv->data);
err_iounmap2:
pci_iounmap(pdev, priv->ctl);
err_free_dev:
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
pci_release_regions(pdev);
pci_disable_device(pdev);
return err;
} /* agnx_pci_probe*/
#ifdef CONFIG_PM
static int agnx_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
AGNX_TRACE;
ieee80211_stop_queues(dev);
agnx_stop(dev);
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int agnx_pci_resume(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
AGNX_TRACE;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
agnx_start(dev);
ieee80211_wake_queues(dev);
return 0;
}
#else
#define agnx_pci_suspend NULL
#define agnx_pci_resume NULL
#endif /* CONFIG_PM */
static struct pci_driver agnx_pci_driver = {
.name = "agnx-pci",
.id_table = agnx_pci_id_tbl,
.probe = agnx_pci_probe,
.remove = __devexit_p(agnx_pci_remove),
.suspend = agnx_pci_suspend,
.resume = agnx_pci_resume,
};
static int __init agnx_pci_init(void)
{
AGNX_TRACE;
return pci_register_driver(&agnx_pci_driver);
}
static void __exit agnx_pci_exit(void)
{
AGNX_TRACE;
pci_unregister_driver(&agnx_pci_driver);
}
module_init(agnx_pci_init);
module_exit(agnx_pci_exit);

960
drivers/staging/agnx/phy.c Normal file
View File

@ -0,0 +1,960 @@
/**
* Airgo MIMO wireless driver
*
* Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
* Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
* works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "agnx.h"
#include "debug.h"
#include "phy.h"
#include "table.h"
#include "sta.h"
#include "xmit.h"
u8 read_from_eeprom(struct agnx_priv *priv, u16 address)
{
void __iomem *ctl = priv->ctl;
struct agnx_eeprom cmd;
u32 reg;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = EEPROM_CMD_READ << AGNX_EEPROM_COMMAND_SHIFT;
cmd.address = address;
/* Verify that the Status bit is clear */
/* Read Command and Address are written to the Serial Interface */
iowrite32(*(__le32 *)&cmd, ctl + AGNX_CIR_SERIALITF);
/* Wait for the Status bit to clear again */
eeprom_delay();
/* Read from Data */
reg = ioread32(ctl + AGNX_CIR_SERIALITF);
cmd = *(struct agnx_eeprom *)&reg;
return cmd.data;
}
static int card_full_reset(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x80);
reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
return 0;
}
inline void enable_power_saving(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg &= ~0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
}
inline void disable_power_saving(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
}
void disable_receiver(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
/* FIXME Disable the receiver */
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
/* Set gain control reset */
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
/* Reset gain control reset */
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
}
/* Fixme this shoule be disable RX, above is enable RX */
void enable_receiver(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
/* Set adaptive gain control discovery mode */
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
/* Set gain control reset */
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
/* Clear gain control reset */
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
}
static void mac_address_set(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u8 *mac_addr = priv->mac_addr;
u32 reg;
/* FIXME */
reg = (mac_addr[0] << 24) | (mac_addr[1] << 16) | mac_addr[2] << 8 | mac_addr[3];
iowrite32(reg, ctl + AGNX_RXM_MACHI);
reg = (mac_addr[4] << 8) | mac_addr[5];
iowrite32(reg, ctl + AGNX_RXM_MACLO);
}
static void receiver_bssid_set(struct agnx_priv *priv, u8 *bssid)
{
void __iomem *ctl = priv->ctl;
u32 reg;
disable_receiver(priv);
/* FIXME */
reg = bssid[0] << 24 | (bssid[1] << 16) | (bssid[2] << 8) | bssid[3];
iowrite32(reg, ctl + AGNX_RXM_BSSIDHI);
reg = (bssid[4] << 8) | bssid[5];
iowrite32(reg, ctl + AGNX_RXM_BSSIDLO);
/* Enable the receiver */
enable_receiver(priv);
/* Clear the TSF */
/* agnx_write32(ctl, AGNX_TXM_TSFLO, 0x0); */
/* agnx_write32(ctl, AGNX_TXM_TSFHI, 0x0); */
/* Clear the TBTT */
agnx_write32(ctl, AGNX_TXM_TBTTLO, 0x0);
agnx_write32(ctl, AGNX_TXM_TBTTHI, 0x0);
disable_receiver(priv);
} /* receiver_bssid_set */
static void band_management_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
void __iomem *data = priv->data;
u32 reg;
int i;
AGNX_TRACE;
agnx_write32(ctl, AGNX_BM_TXWADDR, AGNX_PDU_TX_WQ);
agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
memset_io(data + AGNX_PDUPOOL, 0x0, AGNX_PDUPOOL_SIZE);
agnx_write32(ctl, AGNX_BM_BMCTL, 0x200);
agnx_write32(ctl, AGNX_BM_CIPDUWCNT, 0x40);
agnx_write32(ctl, AGNX_BM_SPPDUWCNT, 0x2);
agnx_write32(ctl, AGNX_BM_RFPPDUWCNT, 0x0);
agnx_write32(ctl, AGNX_BM_RHPPDUWCNT, 0x22);
/* FIXME Initialize the Free Pool Linked List */
/* 1. Write the Address of the Next Node ((0x41800 + node*size)/size)
to the first word of each node. */
for (i = 0; i < PDU_FREE_CNT; i++) {
iowrite32((AGNX_PDU_FREE + (i+1)*PDU_SIZE)/PDU_SIZE,
data + AGNX_PDU_FREE + (PDU_SIZE * i));
/* The last node should be set to 0x0 */
if ((i + 1) == PDU_FREE_CNT)
memset_io(data + AGNX_PDU_FREE + (PDU_SIZE * i),
0x0, PDU_SIZE);
}
/* Head is First Pool address (0x41800) / size (0x80) */
agnx_write32(ctl, AGNX_BM_FPLHP, AGNX_PDU_FREE/PDU_SIZE);
/* Tail is Last Pool Address (0x47f80) / size (0x80) */
agnx_write32(ctl, AGNX_BM_FPLTP, 0x47f80/PDU_SIZE);
/* Count is Number of Nodes in the Pool (0xd0) */
agnx_write32(ctl, AGNX_BM_FPCNT, PDU_FREE_CNT);
/* Start all workqueue */
agnx_write32(ctl, AGNX_BM_CIWQCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_CPULWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_CPUHWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_CPUTXWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_CPURXWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_SPRXWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_SPTXWCTL, 0x80000);
agnx_write32(ctl, AGNX_BM_RFPWCTL, 0x80000);
/* Enable the Band Management */
reg = agnx_read32(ctl, AGNX_BM_BMCTL);
reg |= 0x1;
agnx_write32(ctl, AGNX_BM_BMCTL, reg);
} /* band_managment_init */
static void system_itf_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x0);
agnx_write32(ctl, AGNX_PM_TESTPHY, 0x11e143a);
if (priv->revid == 0) {
reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
reg |= 0x11;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
}
/* ??? What is that means? it should difference for differice type
of cards */
agnx_write32(ctl, AGNX_CIR_SERIALITF, 0xfff81006);
agnx_write32(ctl, AGNX_SYSITF_GPIOIN, 0x1f0000);
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
}
static void encryption_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
agnx_write32(ctl, AGNX_ENCRY_WEPKEY0, 0x0);
agnx_write32(ctl, AGNX_ENCRY_WEPKEY1, 0x0);
agnx_write32(ctl, AGNX_ENCRY_WEPKEY2, 0x0);
agnx_write32(ctl, AGNX_ENCRY_WEPKEY3, 0x0);
agnx_write32(ctl, AGNX_ENCRY_CCMRECTL, 0x8);
}
static void tx_management_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
void __iomem *data = priv->data;
u32 reg;
AGNX_TRACE;
/* Fill out the ComputationalEngineLookupTable
* starting at memory #2 offset 0x800
*/
tx_engine_lookup_tbl_init(priv);
memset_io(data + 0x1000, 0, 0xfe0);
/* Enable Transmission Management Functions */
agnx_write32(ctl, AGNX_TXM_ETMF, 0x3ff);
/* Write 0x3f to Transmission Template */
agnx_write32(ctl, AGNX_TXM_TXTEMP, 0x3f);
if (priv->revid >= 2)
agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e140a0b);
else
agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e190a0b);
reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
reg &= 0xff00;
reg |= 0xb;
agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
reg &= 0xffff00ff;
reg |= 0xa00;
agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
/* Enable TIFS */
agnx_write32(ctl, AGNX_TXM_CTL, 0x40000);
reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
reg &= 0xff00ffff;
reg |= 0x510000;
agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
reg &= 0xff00ffff;
agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
reg &= 0x00ffffff;
reg |= 0x1c000000;
agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
reg &= 0x00ffffff;
reg |= 0x01000000;
agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
/* # Set DIF 0-1,2-3,4-5,6-7 to defaults */
agnx_write32(ctl, AGNX_TXM_DIF01, 0x321d321d);
agnx_write32(ctl, AGNX_TXM_DIF23, 0x321d321d);
agnx_write32(ctl, AGNX_TXM_DIF45, 0x321d321d);
agnx_write32(ctl, AGNX_TXM_DIF67, 0x321d321d);
/* Max Ack timeout limit */
agnx_write32(ctl, AGNX_TXM_MAXACKTIM, 0x1e19);
/* Max RX Data Timeout count, */
reg = agnx_read32(ctl, AGNX_TXM_MAXRXTIME);
reg &= 0xffff0000;
reg |= 0xff;
agnx_write32(ctl, AGNX_TXM_MAXRXTIME, reg);
/* CF poll RX Timeout count */
reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
reg &= 0xffff;
reg |= 0xff0000;
agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
/* Max Timeout Exceeded count, */
reg = agnx_read32(ctl, AGNX_TXM_MAXTIMOUT);
reg &= 0xff00ffff;
reg |= 0x190000;
agnx_write32(ctl, AGNX_TXM_MAXTIMOUT, reg);
/* CF ack timeout limit for 11b */
reg = agnx_read32(ctl, AGNX_TXM_CFACKT11B);
reg &= 0xff00;
reg |= 0x1e;
agnx_write32(ctl, AGNX_TXM_CFACKT11B, reg);
/* Max CF Poll Timeout Count */
reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
reg &= 0xffff0000;
reg |= 0x19;
agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
/* CF Poll RX Timeout Count */
reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
reg &= 0xffff0000;
reg |= 0x1e;
agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
/* # write default to */
/* 1. Schedule Empty Count */
agnx_write32(ctl, AGNX_TXM_SCHEMPCNT, 0x5);
/* 2. CFP Period Count */
agnx_write32(ctl, AGNX_TXM_CFPERCNT, 0x1);
/* 3. CFP MDV */
agnx_write32(ctl, AGNX_TXM_CFPMDV, 0x10000);
/* Probe Delay */
reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
reg &= 0xffff0000;
reg |= 0x400;
agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
/* Max CCA count Slot */
reg = agnx_read32(ctl, AGNX_TXM_MAXCCACNTSLOT);
reg &= 0xffff00ff;
reg |= 0x900;
agnx_write32(ctl, AGNX_TXM_MAXCCACNTSLOT, reg);
/* Slot limit/1 msec Limit */
reg = agnx_read32(ctl, AGNX_TXM_SLOTLIMIT);
reg &= 0xff00ffff;
reg |= 0x140077;
agnx_write32(ctl, AGNX_TXM_SLOTLIMIT, reg);
/* # Set CW #(0-7) to default */
agnx_write32(ctl, AGNX_TXM_CW0, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW1, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW2, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW3, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW4, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW5, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW6, 0xff0007);
agnx_write32(ctl, AGNX_TXM_CW7, 0xff0007);
/* # Set Short/Long limit #(0-7) to default */
agnx_write32(ctl, AGNX_TXM_SLBEALIM0, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM1, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM2, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM3, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM4, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM5, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM6, 0xa000a);
agnx_write32(ctl, AGNX_TXM_SLBEALIM7, 0xa000a);
reg = agnx_read32(ctl, AGNX_TXM_CTL);
reg |= 0x1400;
agnx_write32(ctl, AGNX_TXM_CTL, reg);
/* Wait for bit 0 in Control Reg to clear */
udelay(80);
reg = agnx_read32(ctl, AGNX_TXM_CTL);
/* Or 0x18000 to Control reg */
reg = agnx_read32(ctl, AGNX_TXM_CTL);
reg |= 0x18000;
agnx_write32(ctl, AGNX_TXM_CTL, reg);
/* Wait for bit 0 in Control Reg to clear */
udelay(80);
reg = agnx_read32(ctl, AGNX_TXM_CTL);
/* Set Listen Interval Count to default */
agnx_write32(ctl, AGNX_TXM_LISINTERCNT, 0x1);
/* Set DTIM period count to default */
agnx_write32(ctl, AGNX_TXM_DTIMPERICNT, 0x2000);
} /* tx_management_init */
static void rx_management_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
/* Initialize the Routing Table */
routing_table_init(priv);
if (priv->revid >= 3) {
agnx_write32(ctl, 0x2074, 0x1f171710);
agnx_write32(ctl, 0x2078, 0x10100d0d);
agnx_write32(ctl, 0x207c, 0x11111010);
}
else
agnx_write32(ctl, AGNX_RXM_DELAY11, 0x0);
agnx_write32(ctl, AGNX_RXM_REQRATE, 0x8195e00);
}
static void agnx_timer_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
/* /\* Write 0x249f00 (tick duration?) to Timer 1 *\/ */
/* agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x249f00); */
/* /\* Write 0xe2 to Timer 1 Control *\/ */
/* agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0xe2); */
/* Write 0x249f00 (tick duration?) to Timer 1 */
agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x0);
/* Write 0xe2 to Timer 1 Control */
agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0x0);
iowrite32(0xFFFFFFFF, priv->ctl + AGNX_TXM_BEACON_CTL);
}
static void power_manage_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
agnx_write32(ctl, AGNX_PM_MACMSW, 0x1f);
agnx_write32(ctl, AGNX_PM_RFCTL, 0x1f);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg &= 0xf00f;
reg |= 0xa0;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
if (priv->revid >= 3) {
reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
reg |= 0x18;
agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
}
} /* power_manage_init */
static void gain_ctlcnt_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
agnx_write32(ctl, AGNX_GCR_TRACNT5, 0x119);
agnx_write32(ctl, AGNX_GCR_TRACNT6, 0x118);
agnx_write32(ctl, AGNX_GCR_TRACNT7, 0x117);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg &= ~0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
/* FIXME Write the initial Station Descriptor for the card */
sta_init(priv, LOCAL_STAID);
sta_init(priv, BSSID_STAID);
/* Enable staion 0 and 1 can do TX */
/* It seemed if we set other bit to 1 the bit 0 will
be auto change to 0 */
agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x2 | 0x1);
// agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x1);
} /* gain_ctlcnt_init */
static void phy_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
void __iomem *data = priv->data;
u32 reg;
AGNX_TRACE;
/* Load InitialGainTable */
gain_table_init(priv);
agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x2000000);
/* Clear the following offsets in Memory Range #2: */
memset_io(data + 0x5040, 0, 0xa * 4);
memset_io(data + 0x5080, 0, 0xa * 4);
memset_io(data + 0x50c0, 0, 0xa * 4);
memset_io(data + 0x5400, 0, 0x80 * 4);
memset_io(data + 0x6000, 0, 0x280 * 4);
memset_io(data + 0x7000, 0, 0x280 * 4);
memset_io(data + 0x8000, 0, 0x280 * 4);
/* Initialize the Following Registers According to PCI Revision ID */
if (priv->revid == 0) {
/* fixme the part hasn't been update but below has been update
based on WGM511 */
agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x1d);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x3);
agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
agnx_write32(ctl, AGNX_GCR_THD0AL, 0x4b);
agnx_write32(ctl, AGNX_GCR_THD0B, 0x4b);
agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
reg = agnx_read32(ctl, AGNX_GCR_CWDETEC);
reg |= 0x1;
agnx_write32(ctl, AGNX_GCR_CWDETEC, reg);
agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x0);
agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x0);
agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x0);
agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x0);
agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1);
agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0x1);
agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x78);
agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x1c);
agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x1);
agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
agnx_write32(ctl, AGNX_GCR_THJUMP, 0x14);
agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x30);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x32);
agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
agnx_write32(ctl, 0x9400, 0x0);
agnx_write32(ctl, 0x940c, 0x6ff);
agnx_write32(ctl, 0x9428, 0xa0);
agnx_write32(ctl, 0x9434, 0x0);
agnx_write32(ctl, 0x9c04, 0x15);
agnx_write32(ctl, 0x9c0c, 0x7f);
agnx_write32(ctl, 0x9c34, 0x0);
agnx_write32(ctl, 0xc000, 0x38d);
agnx_write32(ctl, 0x14018, 0x0);
agnx_write32(ctl, 0x16000, 0x1);
agnx_write32(ctl, 0x11004, 0x0);
agnx_write32(ctl, 0xec54, 0xa);
agnx_write32(ctl, 0xec1c, 0x5);
} else if (priv->revid > 0) {
agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
agnx_write32(ctl, AGNX_GCR_CWDETEC, 0x0);
agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
// agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x32);
agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x32);
agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x32);
agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x32);
agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1ad);
agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0xa10);
agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
agnx_write32(ctl, AGNX_GCR_THCS, 0x0);
agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x4);
agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
agnx_write32(ctl, AGNX_GCR_THJUMP, 0x1e);
agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x2a);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
agnx_write32(ctl, AGNX_GCR_WATCHDOG, 0x37);
agnx_write32(ctl, 0x9400, 0x0);
agnx_write32(ctl, 0x940c, 0x6ff);
agnx_write32(ctl, 0x9428, 0xa0);
agnx_write32(ctl, 0x9434, 0x0);
agnx_write32(ctl, 0x9c04, 0x15);
agnx_write32(ctl, 0x9c0c, 0x7f);
agnx_write32(ctl, 0x9c34, 0x0);
agnx_write32(ctl, 0xc000, 0x38d);
agnx_write32(ctl, 0x14014, 0x1000);
agnx_write32(ctl, 0x14018, 0x0);
agnx_write32(ctl, 0x16000, 0x1);
agnx_write32(ctl, 0x11004, 0x0);
agnx_write32(ctl, 0xec54, 0xa);
agnx_write32(ctl, 0xec1c, 0x50);
} else if (priv->revid > 1) {
reg = agnx_read32(ctl, 0xec18);
reg |= 0x8;
agnx_write32(ctl, 0xec18, reg);
}
/* Write the TX Fir Coefficient Table */
tx_fir_table_init(priv);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg &= ~0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
reg |= 0x1;
agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
/* reg = agnx_read32(ctl, 0x1a030); */
/* reg &= ~0x4; */
/* agnx_write32(ctl, 0x1a030, reg); */
agnx_write32(ctl, AGNX_GCR_TRACNT4, 0x113);
} /* phy_init */
static void chip_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
band_management_init(priv);
rf_chips_init(priv);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
/* Initialize the PHY */
phy_init(priv);
encryption_init(priv);
tx_management_init(priv);
rx_management_init(priv);
power_manage_init(priv);
/* Initialize the Timers */
agnx_timer_init(priv);
/* Write 0xc390bf9 to Interrupt Mask (Disable TX) */
reg = 0xc390bf9 & ~IRQ_TX_BEACON;
reg &= ~IRQ_TX_DISABLE;
agnx_write32(ctl, AGNX_INT_MASK, reg);
reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
reg |= 0x800;
agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
/* set it when need get multicast enable? */
agnx_write32(ctl, AGNX_BM_MTSM, 0xff);
} /* chip_init */
static inline void set_promis_and_managed(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
}
static inline void set_learn_mode(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x8);
}
static inline void set_scan_mode(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x20);
}
static inline void set_promiscuous_mode(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
/* agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x210);*/
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10);
}
static inline void set_managed_mode(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x2);
}
static inline void set_adhoc_mode(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x0);
}
#if 0
static void unknow_register_write(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x0, 0x3e);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4, 0xb2);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x8, 0x140);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0xc, 0x1C0);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x10, 0x1FF);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x14, 0x1DD);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x18, 0x15F);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x1c, 0xA1);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x20, 0x3E7);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x24, 0x36B);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x28, 0x348);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x2c, 0x37D);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x30, 0x3DE);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x34, 0x36);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x38, 0x64);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x3c, 0x57);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x40, 0x23);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x44, 0x3ED);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x48, 0x3C9);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4c, 0x3CA);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x50, 0x3E7);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x54, 0x8);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x58, 0x1F);
agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x5c, 0x1a);
}
#endif
static void card_interface_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u8 bssid[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u32 reg;
unsigned int i;
AGNX_TRACE;
might_sleep();
/* Clear RX Control and Enable RX queues */
agnx_write32(ctl, AGNX_CIR_RXCTL, 0x8);
might_sleep();
/* Do a full reset of the card */
card_full_reset(priv);
might_sleep();
/* Check and set Card Endianness */
reg = ioread32(priv->ctl + AGNX_CIR_ENDIAN);
/* TODO If not 0xB3B2B1B0 set to 0xB3B2B1B0 */
printk(KERN_INFO PFX "CIR_ENDIAN is %x\n", reg);
/* Config the eeprom */
agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x7000086);
udelay(10);
reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
reg = agnx_read32(ctl, 0xec50);
reg |= 0xf;
agnx_write32(ctl, 0xec50, reg);
agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
udelay(10);
reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
/* Dump the eeprom */
do {
char eeprom[0x100000/0x100];
for (i = 0; i < 0x100000; i += 0x100) {
agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x3000000 + i);
udelay(13);
reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
udelay(70);
reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
eeprom[i/0x100] = reg & 0xFF;
udelay(10);
}
print_hex_dump_bytes(PFX "EEPROM: ", DUMP_PREFIX_NONE, eeprom,
ARRAY_SIZE(eeprom));
} while(0);
spi_rc_write(ctl, RF_CHIP0, 0x26);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
/* Initialize the system interface */
system_itf_init(priv);
might_sleep();
/* Chip Initialization (Polaris) */
chip_init(priv);
might_sleep();
/* Calibrate the antennae */
antenna_calibrate(priv);
reg = agnx_read32(ctl, 0xec50);
reg &= ~0x40;
agnx_write32(ctl, 0xec50, reg);
agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
agnx_write32(ctl, AGNX_PM_PLLCTL, 0x1);
reg = agnx_read32(ctl, AGNX_BM_BMCTL);
reg |= 0x8000;
agnx_write32(ctl, AGNX_BM_BMCTL, reg);
enable_receiver(priv);
reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
reg |= 0x200;
agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
enable_receiver(priv);
might_sleep();
/* Initialize Gain Control Counts */
gain_ctlcnt_init(priv);
/* Write Initial Station Power Template for this station(#0) */
sta_power_init(priv, LOCAL_STAID);
might_sleep();
/* Initialize the rx,td,tm rings, for each node in the ring */
fill_rings(priv);
might_sleep();
agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
agnx_write32(ctl, 0xec50, 0xc);
agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
/* FIXME Initialize the transmit control register */
agnx_write32(ctl, AGNX_TXM_CTL, 0x194c1);
enable_receiver(priv);
might_sleep();
/* FIXME Set the Receive Control Mac Address to card address */
mac_address_set(priv);
enable_receiver(priv);
might_sleep();
/* Set the recieve request rate */
/* FIXME Enable the request */
/* Check packet length */
/* Set maximum packet length */
/* agnx_write32(ctl, AGNX_RXM_REQRATE, 0x88195e00); */
/* enable_receiver(priv); */
/* Set the Receiver BSSID */
receiver_bssid_set(priv, bssid);
/* FIXME Set to managed mode */
set_managed_mode(priv);
// set_promiscuous_mode(priv);
/* set_scan_mode(priv); */
/* set_learn_mode(priv); */
// set_promis_and_managed(priv);
// set_adhoc_mode(priv);
/* Set the recieve request rate */
/* Check packet length */
agnx_write32(ctl, AGNX_RXM_REQRATE, 0x08000000);
reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
/* Set maximum packet length */
reg |= 0x00195e00;
agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
/* Configure the RX and TX interrupt */
reg = ENABLE_RX_INTERRUPT | RX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
agnx_write32(ctl, AGNX_CIR_RXCFG, reg);
/* FIXME */
reg = ENABLE_TX_INTERRUPT | TX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
agnx_write32(ctl, AGNX_CIR_TXCFG, reg);
/* Enable RX TX Interrupts */
agnx_write32(ctl, AGNX_CIR_RXCTL, 0x80);
agnx_write32(ctl, AGNX_CIR_TXMCTL, 0x80);
agnx_write32(ctl, AGNX_CIR_TXDCTL, 0x80);
/* FIXME Set the master control interrupt in block control */
agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x800);
/* Enable RX and TX queues */
reg = agnx_read32(ctl, AGNX_CIR_RXCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_CIR_RXCTL, reg);
reg = agnx_read32(ctl, AGNX_CIR_TXMCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_CIR_TXMCTL, reg);
reg = agnx_read32(ctl, AGNX_CIR_TXDCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_CIR_TXDCTL, reg);
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
/* FIXME */
/* unknow_register_write(priv); */
/* Update local card hash entry */
hash_write(priv, priv->mac_addr, LOCAL_STAID);
might_sleep();
/* FIXME */
agnx_set_channel(priv, 1);
might_sleep();
} /* agnx_card_interface_init */
void agnx_hw_init(struct agnx_priv *priv)
{
AGNX_TRACE;
might_sleep();
card_interface_init(priv);
}
int agnx_hw_reset(struct agnx_priv *priv)
{
return card_full_reset(priv);
}
int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len)
{
AGNX_TRACE;
return 0;
}
void agnx_set_bssid(struct agnx_priv *priv, u8 *bssid)
{
receiver_bssid_set(priv, bssid);
}

409
drivers/staging/agnx/phy.h Normal file
View File

@ -0,0 +1,409 @@
#ifndef AGNX_PHY_H_
#define AGNX_PHY_H_
#include "agnx.h"
/* Transmission Managment Registers */
#define AGNX_TXM_BASE 0x0000
#define AGNX_TXM_CTL 0x0000 /* control register */
#define AGNX_TXM_ETMF 0x0004 /* enable transmission management functions */
#define AGNX_TXM_TXTEMP 0x0008 /* transmission template */
#define AGNX_TXM_RETRYSTAID 0x000c /* Retry Station ID */
#define AGNX_TXM_TIMESTAMPLO 0x0010 /* Timestamp Lo */
#define AGNX_TXM_TIMESTAMPHI 0x0014 /* Timestamp Hi */
#define AGNX_TXM_TXDELAY 0x0018 /* tx delay */
#define AGNX_TXM_TBTTLO 0x0020 /* tbtt Lo */
#define AGNX_TXM_TBTTHI 0x0024 /* tbtt Hi */
#define AGNX_TXM_BEAINTER 0x0028 /* Beacon Interval */
#define AGNX_TXM_NAV 0x0030 /* NAV */
#define AGNX_TXM_CFPMDV 0x0034 /* CFP MDV */
#define AGNX_TXM_CFPERCNT 0x0038 /* CFP period count */
#define AGNX_TXM_PROBDELAY 0x003c /* probe delay */
#define AGNX_TXM_LISINTERCNT 0x0040 /* listen interval count */
#define AGNX_TXM_DTIMPERICNT 0x004c /* DTIM period count */
#define AGNX_TXM_BEACON_CTL 0x005c /* beacon control */
#define AGNX_TXM_SCHEMPCNT 0x007c /* schedule empty count */
#define AGNX_TXM_MAXTIMOUT 0x0084 /* max timeout exceed count */
#define AGNX_TXM_MAXCFPTIM 0x0088 /* max CF poll timeout count */
#define AGNX_TXM_MAXRXTIME 0x008c /* max RX timeout count */
#define AGNX_TXM_MAXACKTIM 0x0090 /* max ACK timeout count */
#define AGNX_TXM_DIF01 0x00a0 /* DIF 0-1 */
#define AGNX_TXM_DIF23 0x00a4 /* DIF 2-3 */
#define AGNX_TXM_DIF45 0x00a8 /* DIF 4-5 */
#define AGNX_TXM_DIF67 0x00ac /* DIF 6-7 */
#define AGNX_TXM_SIFSPIFS 0x00b0 /* SIFS/PIFS */
#define AGNX_TXM_TIFSEIFS 0x00b4 /* TIFS/EIFS */
#define AGNX_TXM_MAXCCACNTSLOT 0x00b8 /* max CCA count slot */
#define AGNX_TXM_SLOTLIMIT 0x00bc /* slot limit/1 msec limit */
#define AGNX_TXM_CFPOLLRXTIM 0x00f0 /* CF poll RX timeout count */
#define AGNX_TXM_CFACKT11B 0x00f4 /* CF ack timeout limit for 11b */
#define AGNX_TXM_CW0 0x0100 /* CW 0 */
#define AGNX_TXM_SLBEALIM0 0x0108 /* short/long beacon limit 0 */
#define AGNX_TXM_CW1 0x0120 /* CW 1 */
#define AGNX_TXM_SLBEALIM1 0x0128 /* short/long beacon limit 1 */
#define AGNX_TXM_CW2 0x0140 /* CW 2 */
#define AGNX_TXM_SLBEALIM2 0x0148 /* short/long beacon limit 2 */
#define AGNX_TXM_CW3 0x0160 /* CW 3 */
#define AGNX_TXM_SLBEALIM3 0x0168 /* short/long beacon limit 3 */
#define AGNX_TXM_CW4 0x0180 /* CW 4 */
#define AGNX_TXM_SLBEALIM4 0x0188 /* short/long beacon limit 4 */
#define AGNX_TXM_CW5 0x01a0 /* CW 5 */
#define AGNX_TXM_SLBEALIM5 0x01a8 /* short/long beacon limit 5 */
#define AGNX_TXM_CW6 0x01c0 /* CW 6 */
#define AGNX_TXM_SLBEALIM6 0x01c8 /* short/long beacon limit 6 */
#define AGNX_TXM_CW7 0x01e0 /* CW 7 */
#define AGNX_TXM_SLBEALIM7 0x01e8 /* short/long beacon limit 7 */
#define AGNX_TXM_BEACONTEMP 0x1000 /* beacon template */
#define AGNX_TXM_STAPOWTEMP 0x1a00 /* Station Power Template */
/* Receive Management Control Registers */
#define AGNX_RXM_BASE 0x2000
#define AGNX_RXM_REQRATE 0x2000 /* requested rate */
#define AGNX_RXM_MACHI 0x2004 /* first 4 bytes of mac address */
#define AGNX_RXM_MACLO 0x2008 /* last 2 bytes of mac address */
#define AGNX_RXM_BSSIDHI 0x200c /* bssid hi */
#define AGNX_RXM_BSSIDLO 0x2010 /* bssid lo */
#define AGNX_RXM_HASH_CMD_FLAG 0x2014 /* Flags for the RX Hash Command Default:0 */
#define AGNX_RXM_HASH_CMD_HIGH 0x2018 /* The High half of the Hash Command */
#define AGNX_RXM_HASH_CMD_LOW 0x201c /* The Low half of the Hash Command */
#define AGNX_RXM_ROUTAB 0x2020 /* routing table */
#define ROUTAB_SUBTYPE_SHIFT 24
#define ROUTAB_TYPE_SHIFT 28
#define ROUTAB_STATUS_SHIFT 30
#define ROUTAB_RW_SHIFT 31
#define ROUTAB_ROUTE_DROP 0xf00000 /* Drop */
#define ROUTAB_ROUTE_CPU 0x400000 /* CPU */
#define ROUTAB_ROUTE_ENCRY 0x500800 /* Encryption */
#define ROUTAB_ROUTE_RFP 0x800000 /* RFP */
#define ROUTAB_TYPE_MANAG 0x0 /* Management */
#define ROUTAB_TYPE_CTL 0x1 /* Control */
#define ROUTAB_TYPE_DATA 0x2 /* Data */
#define ROUTAB_SUBTYPE_DATA 0x0
#define ROUTAB_SUBTYPE_DATAACK 0x1
#define ROUTAB_SUBTYPE_DATAPOLL 0x2
#define ROUTAB_SUBTYPE_DATAPOLLACK 0x3
#define ROUTAB_SUBTYPE_NULL 0x4 /* NULL */
#define ROUTAB_SUBTYPE_NULLACK 0x5
#define ROUTAB_SUBTYPE_NULLPOLL 0x6
#define ROUTAB_SUBTYPE_NULLPOLLACK 0x7
#define ROUTAB_SUBTYPE_QOSDATA 0x8 /* QOS DATA */
#define ROUTAB_SUBTYPE_QOSDATAACK 0x9
#define ROUTAB_SUBTYPE_QOSDATAPOLL 0xa
#define ROUTAB_SUBTYPE_QOSDATAACKPOLL 0xb
#define ROUTAB_SUBTYPE_QOSNULL 0xc
#define ROUTAB_SUBTYPE_QOSNULLACK 0xd
#define ROUTAB_SUBTYPE_QOSNULLPOLL 0xe
#define ROUTAB_SUBTYPE_QOSNULLPOLLACK 0xf
#define AGNX_RXM_DELAY11 0x2024 /* delay 11(AB) */
#define AGNX_RXM_SOF_CNT 0x2028 /* SOF Count */
#define AGNX_RXM_FRAG_CNT 0x202c /* Fragment Count*/
#define AGNX_RXM_FCS_CNT 0x2030 /* FCS Count */
#define AGNX_RXM_BSSID_MISS_CNT 0x2034 /* BSSID Miss Count */
#define AGNX_RXM_PDU_ERR_CNT 0x2038 /* PDU Error Count */
#define AGNX_RXM_DEST_MISS_CNT 0x203C /* Destination Miss Count */
#define AGNX_RXM_DROP_CNT 0x2040 /* Drop Count */
#define AGNX_RXM_ABORT_CNT 0x2044 /* Abort Count */
#define AGNX_RXM_RELAY_CNT 0x2048 /* Relay Count */
#define AGNX_RXM_HASH_MISS_CNT 0x204c /* Hash Miss Count */
#define AGNX_RXM_SA_HI 0x2050 /* Address of received packet Hi */
#define AGNX_RXM_SA_LO 0x2054 /* Address of received packet Lo */
#define AGNX_RXM_HASH_DUMP_LST 0x2100 /* Contains Hash Data */
#define AGNX_RXM_HASH_DUMP_MST 0x2104 /* Contains Hash Data */
#define AGNX_RXM_HASH_DUMP_DATA 0x2108 /* The Station ID to dump */
/* Encryption Managment */
#define AGNX_ENCRY_BASE 0x2400
#define AGNX_ENCRY_WEPKEY0 0x2440 /* wep key #0 */
#define AGNX_ENCRY_WEPKEY1 0x2444 /* wep key #1 */
#define AGNX_ENCRY_WEPKEY2 0x2448 /* wep key #2 */
#define AGNX_ENCRY_WEPKEY3 0x244c /* wep key #3 */
#define AGNX_ENCRY_CCMRECTL 0x2460 /* ccm replay control */
/* Band Management Registers */
#define AGNX_BM_BASE 0x2c00
#define AGNX_BM_BMCTL 0x2c00 /* band management control */
#define AGNX_BM_TXWADDR 0x2c18 /* tx workqueue address start */
#define AGNX_BM_TXTOPEER 0x2c24 /* transmit to peers */
#define AGNX_BM_FPLHP 0x2c2c /* free pool list head pointer */
#define AGNX_BM_FPLTP 0x2c30 /* free pool list tail pointer */
#define AGNX_BM_FPCNT 0x2c34 /* free pool count */
#define AGNX_BM_CIPDUWCNT 0x2c38 /* card interface pdu workqueue count */
#define AGNX_BM_SPPDUWCNT 0x2c3c /* sp pdu workqueue count */
#define AGNX_BM_RFPPDUWCNT 0x2c40 /* rfp pdu workqueue count */
#define AGNX_BM_RHPPDUWCNT 0x2c44 /* rhp pdu workqueue count */
#define AGNX_BM_CIWQCTL 0x2c48 /* Card Interface WorkQueue Control */
#define AGNX_BM_CPUTXWCTL 0x2c50 /* cpu tx workqueue control */
#define AGNX_BM_CPURXWCTL 0x2c58 /* cpu rx workqueue control */
#define AGNX_BM_CPULWCTL 0x2c60 /* cpu low workqueue control */
#define AGNX_BM_CPUHWCTL 0x2c68 /* cpu high workqueue control */
#define AGNX_BM_SPTXWCTL 0x2c70 /* sp tx workqueue control */
#define AGNX_BM_SPRXWCTL 0x2c78 /* sp rx workqueue control */
#define AGNX_BM_RFPWCTL 0x2c80 /* RFP workqueue control */
#define AGNX_BM_MTSM 0x2c90 /* Multicast Transmit Station Mask */
/* Card Interface Registers (32bits) */
#define AGNX_CIR_BASE 0x3000
#define AGNX_CIR_BLKCTL 0x3000 /* block control*/
#define AGNX_STAT_TX 0x1
#define AGNX_STAT_RX 0x2
#define AGNX_STAT_X 0x4
/* Below two interrupt flags will be set by our but not CPU or the card */
#define AGNX_STAT_TXD 0x10
#define AGNX_STAT_TXM 0x20
#define AGNX_CIR_ADDRWIN 0x3004 /* Addressable Windows*/
#define AGNX_CIR_ENDIAN 0x3008 /* card endianness */
#define AGNX_CIR_SERIALITF 0x3020 /* serial interface */
#define AGNX_CIR_RXCFG 0x3040 /* receive config */
#define ENABLE_RX_INTERRUPT 0x20
#define RX_CACHE_LINE 0x8
/* the RX fragment length */
#define FRAG_LEN_256 0x0 /* 256B */
#define FRAG_LEN_512 0x1
#define FRAG_LEN_1024 0x2
#define FRAG_LEN_2048 0x3
#define FRAG_BE 0x10
#define AGNX_CIR_RXCTL 0x3050 /* receive control */
/* memory address, chipside */
#define AGNX_CIR_RXCMSTART 0x3054 /* receive client memory start */
#define AGNX_CIR_RXCMEND 0x3058 /* receive client memory end */
/* memory address, pci */
#define AGNX_CIR_RXHOSTADDR 0x3060 /* receive hostside address */
/* memory address, chipside */
#define AGNX_CIR_RXCLIADDR 0x3064 /* receive clientside address */
#define AGNX_CIR_RXDMACTL 0x3068 /* receive dma control */
#define AGNX_CIR_TXCFG 0x3080 /* transmit config */
#define AGNX_CIR_TXMCTL 0x3090 /* Transmit Management Control */
#define ENABLE_TX_INTERRUPT 0x20
#define TX_CACHE_LINE 0x8
#define AGNX_CIR_TXMSTART 0x3094 /* Transmit Management Start */
#define AGNX_CIR_TXMEND 0x3098 /* Transmit Management End */
#define AGNX_CIR_TXDCTL 0x30a0 /* transmit data control */
/* memeory address, chipset */
#define AGNX_CIR_TXDSTART 0x30a4 /* transmit data start */
#define AGNX_CIR_TXDEND 0x30a8 /* transmit data end */
#define AGNX_CIR_TXMHADDR 0x30b0 /* Transmit Management Hostside Address */
#define AGNX_CIR_TXMCADDR 0x30b4 /* Transmit Management Clientside Address */
#define AGNX_CIR_TXDMACTL 0x30b8 /* transmit dma control */
/* Power Managment Unit */
#define AGNX_PM_BASE 0x3c00
#define AGNX_PM_PMCTL 0x3c00 /* PM Control*/
#define AGNX_PM_MACMSW 0x3c08 /* MAC Manual Slow Work Enable */
#define AGNX_PM_RFCTL 0x3c0c /* RF Control */
#define AGNX_PM_PHYMW 0x3c14 /* Phy Mannal Work */
#define AGNX_PM_SOFTRST 0x3c18 /* PMU Soft Reset */
#define AGNX_PM_PLLCTL 0x3c1c /* PMU PLL control*/
#define AGNX_PM_TESTPHY 0x3c24 /* PMU Test Phy */
/* Interrupt Control interface */
#define AGNX_INT_BASE 0x4000
#define AGNX_INT_STAT 0x4000 /* interrupt status */
#define AGNX_INT_MASK 0x400c /* interrupt mask */
/* FIXME */
#define IRQ_TX_BEACON 0x1 /* TX Beacon */
#define IRQ_TX_RETRY 0x8 /* TX Retry Interrupt */
#define IRQ_TX_ACTIVITY 0x10 /* TX Activity */
#define IRQ_RX_ACTIVITY 0x20 /* RX Activity */
/* FIXME I guess that instead RX a none exist staion's packet or
the station hasn't been init */
#define IRQ_RX_X 0x40
#define IRQ_RX_Y 0x80 /* RX ? */
#define IRQ_RX_HASHHIT 0x100 /* RX Hash Hit */
#define IRQ_RX_FRAME 0x200 /* RX Frame */
#define IRQ_ERR_INT 0x400 /* Error Interrupt */
#define IRQ_TX_QUE_FULL 0x800 /* TX Workqueue Full */
#define IRQ_BANDMAN_ERR 0x10000 /* Bandwidth Management Error */
#define IRQ_TX_DISABLE 0x20000 /* TX Disable */
#define IRQ_RX_IVASESKEY 0x80000 /* RX Invalid Session Key */
#define IRQ_RX_KEYIDMIS 0x100000 /* RX key ID Mismatch */
#define IRQ_REP_THHIT 0x200000 /* Replay Threshold Hit */
#define IRQ_TIMER1 0x4000000 /* Timer1 */
#define IRQ_TIMER_CNT 0x10000000 /* Timer Count */
#define IRQ_PHY_FASTINT 0x20000000 /* Phy Fast Interrupt */
#define IRQ_PHY_SLOWINT 0x40000000 /* Phy Slow Interrupt */
#define IRQ_OTHER 0x80000000 /* Unknow interrupt */
#define AGNX_IRQ_ALL 0xffffffff
/* System Interface */
#define AGNX_SYSITF_BASE 0x4400
#define AGNX_SYSITF_SYSMODE 0x4400 /* system mode */
#define AGNX_SYSITF_GPIOIN 0x4410 /* GPIO In */
/* PIN lines for leds? */
#define AGNX_SYSITF_GPIOUT 0x4414 /* GPIO Out */
/* Timer Control */
#define AGNX_TIMCTL_TIMER1 0x4800 /* Timer 1 */
#define AGNX_TIMCTL_TIM1CTL 0x4808 /* Timer 1 Control */
/* Antenna Calibration Interface */
#define AGNX_ACI_BASE 0x5000
#define AGNX_ACI_MODE 0x5000 /* Mode */
#define AGNX_ACI_MEASURE 0x5004 /* Measure */
#define AGNX_ACI_SELCHAIN 0x5008 /* Select Chain */
#define AGNX_ACI_LEN 0x500c /* Length */
#define AGNX_ACI_TIMER1 0x5018 /* Timer 1 */
#define AGNX_ACI_TIMER2 0x501c /* Timer 2 */
#define AGNX_ACI_OFFSET 0x5020 /* Offset */
#define AGNX_ACI_STATUS 0x5030 /* Status */
#define CALI_IDLE 0x0
#define CALI_DONE 0x1
#define CALI_BUSY 0x2
#define CALI_ERR 0x3
#define AGNX_ACI_AICCHA0OVE 0x5034 /* AIC Channel 0 Override */
#define AGNX_ACI_AICCHA1OVE 0x5038 /* AIC Channel 1 Override */
/* Gain Control Registers */
#define AGNX_GCR_BASE 0x9000
/* threshold of primary antenna */
#define AGNX_GCR_THD0A 0x9000 /* threshold? D0 A */
/* low threshold of primary antenna */
#define AGNX_GCR_THD0AL 0x9004 /* threshold? D0 A low */
/* threshold of secondary antenna */
#define AGNX_GCR_THD0B 0x9008 /* threshold? D0_B */
#define AGNX_GCR_DUNSAT 0x900c /* d unsaturated */
#define AGNX_GCR_DSAT 0x9010 /* d saturated */
#define AGNX_GCR_DFIRCAL 0x9014 /* D Fir/Cal */
#define AGNX_GCR_DGCTL11A 0x9018 /* d gain control 11a */
#define AGNX_GCR_DGCTL11B 0x901c /* d gain control 11b */
/* strength of gain */
#define AGNX_GCR_GAININIT 0x9020 /* gain initialization */
#define AGNX_GCR_THNOSIG 0x9024 /* threhold no signal */
#define AGNX_GCR_COARSTEP 0x9028 /* coarse stepping */
#define AGNX_GCR_SIFST11A 0x902c /* sifx time 11a */
#define AGNX_GCR_SIFST11B 0x9030 /* sifx time 11b */
#define AGNX_GCR_CWDETEC 0x9034 /* cw detection */
#define AGNX_GCR_0X38 0x9038 /* ???? */
#define AGNX_GCR_BOACT 0x903c /* BO Active */
#define AGNX_GCR_BOINACT 0x9040 /* BO Inactive */
#define AGNX_GCR_BODYNA 0x9044 /* BO dynamic */
/* 802.11 mode(a,b,g) */
#define AGNX_GCR_DISCOVMOD 0x9048 /* discovery mode */
#define AGNX_GCR_NLISTANT 0x904c /* number of listening antenna */
#define AGNX_GCR_NACTIANT 0x9050 /* number of active antenna */
#define AGNX_GCR_NMEASANT 0x9054 /* number of measuring antenna */
#define AGNX_GCR_NCAPTANT 0x9058 /* number of capture antenna */
#define AGNX_GCR_THCAP11A 0x905c /* threshold capture 11a */
#define AGNX_GCR_THCAP11B 0x9060 /* threshold capture 11b */
#define AGNX_GCR_THCAPRX11A 0x9064 /* threshold capture rx 11a */
#define AGNX_GCR_THCAPRX11B 0x9068 /* threshold capture rx 11b */
#define AGNX_GCR_THLEVDRO 0x906c /* threshold level drop */
#define AGNX_GCR_GAINSET0 0x9070 /* Gainset 0 */
#define AGNX_GCR_GAINSET1 0x9074 /* Gainset 1 */
#define AGNX_GCR_GAINSET2 0x9078 /* Gainset 2 */
#define AGNX_GCR_MAXRXTIME11A 0x907c /* maximum rx time 11a */
#define AGNX_GCR_MAXRXTIME11B 0x9080 /* maximum rx time 11b */
#define AGNX_GCR_CORRTIME 0x9084 /* correction time */
/* reset the subsystem, 0 = disable, 1 = enable */
#define AGNX_GCR_RSTGCTL 0x9088 /* reset gain control */
/* channel receiving */
#define AGNX_GCR_RXCHANEL 0x908c /* receive channel */
#define AGNX_GCR_NOISE0 0x9090 /* Noise 0 */
#define AGNX_GCR_NOISE1 0x9094 /* Noise 1 */
#define AGNX_GCR_NOISE2 0x9098 /* Noise 2 */
#define AGNX_GCR_SIGHTH 0x909c /* Signal High Threshold */
#define AGNX_GCR_SIGLTH 0x90a0 /* Signal Low Threshold */
#define AGNX_GCR_CORRDROP 0x90a4 /* correction drop */
/* threshold of tertiay antenna */
#define AGNX_GCR_THCD 0x90a8 /* threshold? CD */
#define AGNX_GCR_THCS 0x90ac /* threshold? CS */
#define AGNX_GCR_MAXPOWDIFF 0x90b8 /* maximum power difference */
#define AGNX_GCR_TRACNT4 0x90ec /* Transition Count 4 */
#define AGNX_GCR_TRACNT5 0x90f0 /* transition count 5 */
#define AGNX_GCR_TRACNT6 0x90f4 /* transition count 6 */
#define AGNX_GCR_TRACNT7 0x90f8 /* transition coutn 7 */
#define AGNX_GCR_TESTBUS 0x911c /* test bus */
#define AGNX_GCR_CHAINNUM 0x9120 /* Number of Chains */
#define AGNX_GCR_ANTCFG 0x9124 /* Antenna Config */
#define AGNX_GCR_THJUMP 0x912c /* threhold jump */
#define AGNX_GCR_THPOWER 0x9130 /* threshold power */
#define AGNX_GCR_THPOWCLIP 0x9134 /* threshold power clip*/
#define AGNX_GCR_FORCECTLCLK 0x9138 /* Force Gain Control Clock */
#define AGNX_GCR_GAINSETWRITE 0x913c /* Gainset Write */
#define AGNX_GCR_THD0BTFEST 0x9140 /* threshold d0 b tf estimate */
#define AGNX_GCR_THRX11BPOWMIN 0x9144 /* threshold rx 11b power minimum */
#define AGNX_GCR_0X14c 0x914c /* ?? */
#define AGNX_GCR_0X150 0x9150 /* ?? */
#define AGNX_GCR_RXOVERIDE 0x9194 /* recieve override */
#define AGNX_GCR_WATCHDOG 0x91b0 /* watchdog timeout */
/* Spi Interface */
#define AGNX_SPI_BASE 0xdc00
#define AGNX_SPI_CFG 0xdc00 /* spi configuration */
/* Only accept 16 bits */
#define AGNX_SPI_WMSW 0xdc04 /* write most significant word */
/* Only accept 16 bits */
#define AGNX_SPI_WLSW 0xdc08 /* write least significant word */
#define AGNX_SPI_CTL 0xdc0c /* spi control */
#define AGNX_SPI_RMSW 0xdc10 /* read most significant word */
#define AGNX_SPI_RLSW 0xdc14 /* read least significant word */
/* SPI Control Mask */
#define SPI_READ_CTL 0x4000 /* read control */
#define SPI_BUSY_CTL 0x8000 /* busy control */
/* RF and synth chips in spi */
#define RF_CHIP0 0x400
#define RF_CHIP1 0x800
#define RF_CHIP2 0x1000
#define SYNTH_CHIP 0x2000
/* Unknown register */
#define AGNX_UNKNOWN_BASE 0x7800
/* FIXME MonitorGain */
#define AGNX_MONGCR_BASE 0x12000
/* Gain Table */
#define AGNX_GAIN_TABLE 0x12400
/* The initial FIR coefficient table */
#define AGNX_FIR_BASE 0x19804
#define AGNX_ENGINE_LOOKUP_TBL 0x800
/* eeprom commands */
#define EEPROM_CMD_NULL 0x0 /* NULL */
#define EEPROM_CMD_WRITE 0x2 /* write */
#define EEPROM_CMD_READ 0x3 /* read */
#define EEPROM_CMD_STATUSREAD 0x5 /* status register read */
#define EEPROM_CMD_WRITEENABLE 0x6 /* write enable */
#define EEPROM_CMD_CONFIGURE 0x7 /* configure */
#define EEPROM_DATAFORCOFIGURE 0x6 /* ??? */
/* eeprom address */
#define EEPROM_ADDR_SUBVID 0x0 /* Sub Vendor ID */
#define EEPROM_ADDR_SUBSID 0x2 /* Sub System ID */
#define EEPROM_ADDR_MACADDR 0x146 /* MAC Address */
#define EEPROM_ADDR_LOTYPE 0x14f /* LO type */
struct agnx_eeprom {
u8 data; /* date */
u16 address; /* address in EEPROM */
u8 cmd; /* command, unknown, status */
} __attribute__((__packed__));
#define AGNX_EEPROM_COMMAND_SHIFT 5
#define AGNX_EEPROM_COMMAND_STAT 0x01
void disable_receiver(struct agnx_priv *priv);
void enable_receiver(struct agnx_priv *priv);
u8 read_from_eeprom(struct agnx_priv *priv, u16 address);
void agnx_hw_init(struct agnx_priv *priv);
int agnx_hw_reset(struct agnx_priv *priv);
int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len);
void agnx_set_bssid(struct agnx_priv *priv, u8 *bssid);
void enable_power_saving(struct agnx_priv *priv);
void disable_power_saving(struct agnx_priv *priv);
void calibrate_antenna_period(unsigned long data);
#endif /* AGNX_PHY_H_ */

894
drivers/staging/agnx/rf.c Normal file
View File

@ -0,0 +1,894 @@
/**
* Airgo MIMO wireless driver
*
* Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
* Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
* works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include "agnx.h"
#include "debug.h"
#include "phy.h"
#include "table.h"
/* FIXME! */
static inline void spi_write(void __iomem *region, u32 chip_ids, u32 sw,
u16 size, u32 control)
{
u32 reg;
u32 lsw = sw & 0xffff; /* lower 16 bits of sw*/
u32 msw = sw >> 16; /* high 16 bits of sw */
/* FIXME Write Most Significant Word of the 32bit data to MSW */
/* FIXME And Least Significant Word to LSW */
iowrite32((lsw), region + AGNX_SPI_WLSW);
iowrite32((msw), region + AGNX_SPI_WMSW);
reg = chip_ids | size | control;
/* Write chip id(s), write size and busy control to Control Register */
iowrite32((reg), region + AGNX_SPI_CTL);
/* Wait for Busy control to clear */
spi_delay();
}
/*
* Write to SPI Synth register
*/
static inline void spi_sy_write(void __iomem *region, u32 chip_ids, u32 sw)
{
/* FIXME the size 0x15 is a magic value*/
spi_write(region, chip_ids, sw, 0x15, SPI_BUSY_CTL);
}
/*
* Write to SPI RF register
*/
static inline void spi_rf_write(void __iomem *region, u32 chip_ids, u32 sw)
{
/* FIXME the size 0xd is a magic value*/
spi_write(region, chip_ids, sw, 0xd, SPI_BUSY_CTL);
} /* spi_rf_write */
/*
* Write to SPI with Read Control bit set
*/
inline void spi_rc_write(void __iomem *region, u32 chip_ids, u32 sw)
{
/* FIXME the size 0xe5 is a magic value */
spi_write(region, chip_ids, sw, 0xe5, SPI_BUSY_CTL|SPI_READ_CTL);
}
/* Get the active chains's count */
static int get_active_chains(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int num = 0;
u32 reg;
AGNX_TRACE;
spi_rc_write(ctl, RF_CHIP0, 0x21);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (reg == 1)
num++;
spi_rc_write(ctl, RF_CHIP1, 0x21);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (reg == 1)
num++;
spi_rc_write(ctl, RF_CHIP2, 0x21);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (reg == 1)
num++;
spi_rc_write(ctl, RF_CHIP0, 0x26);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (0x33 != reg)
printk(KERN_WARNING PFX "Unmatched rf chips result\n");
return num;
} /* get_active_chains */
void rf_chips_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
int num;
AGNX_TRACE;
if (priv->revid == 1) {
reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
reg |= 0x8;
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
}
/* Set SPI clock speed to 200NS */
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xF;
reg |= 0x3;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
/* Set SPI clock speed to 50NS */
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xF;
reg |= 0x1;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1101);
num = get_active_chains(priv);
printk(KERN_INFO PFX "Active chains are %d\n", num);
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xF;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1908);
} /* rf_chips_init */
static u32 channel_tbl[15][9] = {
{0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{1, 0x00, 0x00, 0x624, 0x00, 0x1a4, 0x28, 0x00, 0x1e},
{2, 0x00, 0x00, 0x615, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
{3, 0x00, 0x00, 0x61a, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
{4, 0x00, 0x00, 0x61f, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
{5, 0x00, 0x00, 0x624, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
{6, 0x00, 0x00, 0x61f, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
{7, 0x00, 0x00, 0x624, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
{8, 0x00, 0x00, 0x629, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
{9, 0x00, 0x00, 0x624, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
{10, 0x00, 0x00, 0x629, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
{11, 0x00, 0x00, 0x62e, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
{12, 0x00, 0x00, 0x633, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
{13, 0x00, 0x00, 0x628, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
{14, 0x00, 0x00, 0x644, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
};
static inline void
channel_tbl_write(struct agnx_priv *priv, unsigned int channel, unsigned int reg_num)
{
void __iomem *ctl = priv->ctl;
u32 reg;
reg = channel_tbl[channel][reg_num];
reg <<= 4;
reg |= reg_num;
spi_sy_write(ctl, SYNTH_CHIP, reg);
}
static void synth_freq_set(struct agnx_priv *priv, unsigned int channel)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
/* Set the Clock bits to 50NS */
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xF;
reg |= 0x1;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
/* Write 0x00c0 to LSW and 0x3 to MSW of Synth Chip */
spi_sy_write(ctl, SYNTH_CHIP, 0x300c0);
spi_sy_write(ctl, SYNTH_CHIP, 0x32);
/* # Write to Register 1 on the Synth Chip */
channel_tbl_write(priv, channel, 1);
/* # Write to Register 3 on the Synth Chip */
channel_tbl_write(priv, channel, 3);
/* # Write to Register 6 on the Synth Chip */
channel_tbl_write(priv, channel, 6);
/* # Write to Register 5 on the Synth Chip */
channel_tbl_write(priv, channel, 5);
/* # Write to register 8 on the Synth Chip */
channel_tbl_write(priv, channel, 8);
/* FIXME Clear the clock bits */
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xf;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
} /* synth_chip_init */
static void antenna_init(struct agnx_priv *priv, int num_antenna)
{
void __iomem *ctl = priv->ctl;
switch (num_antenna) {
case 1:
agnx_write32(ctl, AGNX_GCR_NLISTANT, 1);
agnx_write32(ctl, AGNX_GCR_NMEASANT, 1);
agnx_write32(ctl, AGNX_GCR_NACTIANT, 1);
agnx_write32(ctl, AGNX_GCR_NCAPTANT, 1);
agnx_write32(ctl, AGNX_GCR_ANTCFG, 7);
agnx_write32(ctl, AGNX_GCR_BOACT, 34);
agnx_write32(ctl, AGNX_GCR_BOINACT, 34);
agnx_write32(ctl, AGNX_GCR_BODYNA, 30);
agnx_write32(ctl, AGNX_GCR_THD0A, 125);
agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
agnx_write32(ctl, AGNX_GCR_THD0B, 90);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 80);
agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
agnx_write32(ctl, AGNX_GCR_SIGLTH, 16);
break;
case 2:
agnx_write32(ctl, AGNX_GCR_NLISTANT, 2);
agnx_write32(ctl, AGNX_GCR_NMEASANT, 2);
agnx_write32(ctl, AGNX_GCR_NACTIANT, 2);
agnx_write32(ctl, AGNX_GCR_NCAPTANT, 2);
agnx_write32(ctl, AGNX_GCR_ANTCFG, 15);
agnx_write32(ctl, AGNX_GCR_BOACT, 36);
agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
agnx_write32(ctl, AGNX_GCR_THD0A, 120);
agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
agnx_write32(ctl, AGNX_GCR_THD0B, 80);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
agnx_write32(ctl, AGNX_GCR_SIGLTH, 32);
break;
case 3:
agnx_write32(ctl, AGNX_GCR_NLISTANT, 3);
agnx_write32(ctl, AGNX_GCR_NMEASANT, 3);
agnx_write32(ctl, AGNX_GCR_NACTIANT, 3);
agnx_write32(ctl, AGNX_GCR_NCAPTANT, 3);
agnx_write32(ctl, AGNX_GCR_ANTCFG, 31);
agnx_write32(ctl, AGNX_GCR_BOACT, 36);
agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
agnx_write32(ctl, AGNX_GCR_THD0A, 100);
agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
agnx_write32(ctl, AGNX_GCR_THD0B, 70);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
agnx_write32(ctl, AGNX_GCR_SIGLTH, 48);
// agnx_write32(ctl, AGNX_GCR_SIGLTH, 16);
break;
default:
printk(KERN_WARNING PFX "Unknow antenna number\n");
}
} /* antenna_init */
static void chain_update(struct agnx_priv *priv, u32 chain)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
spi_rc_write(ctl, RF_CHIP0, 0x20);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (reg == 0x4)
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
else if (reg != 0x0)
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
else {
if (chain == 3 || chain == 6) {
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
} else if (chain == 2 || chain == 4) {
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
spi_rf_write(ctl, RF_CHIP2, 0x1005);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x824);
} else if (chain == 1) {
spi_rf_write(ctl, RF_CHIP0, reg|0x1000);
spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1004);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xc36);
}
}
spi_rc_write(ctl, RF_CHIP0, 0x22);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
switch (reg) {
case 0:
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
break;
case 1:
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
break;
case 2:
if (chain == 6 || chain == 4) {
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1202);
spi_rf_write(ctl, RF_CHIP2, 0x1005);
} else if (chain < 3) {
spi_rf_write(ctl, RF_CHIP0, 0x1202);
spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1005);
}
break;
default:
if (chain == 3) {
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
spi_rf_write(ctl, RF_CHIP2, 0x1201);
} else if (chain == 2) {
spi_rf_write(ctl, RF_CHIP0, 0x1203);
spi_rf_write(ctl, RF_CHIP2, 0x1200);
spi_rf_write(ctl, RF_CHIP1, 0x1201);
} else if (chain == 1) {
spi_rf_write(ctl, RF_CHIP0, 0x1203);
spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1200);
} else if (chain == 4) {
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
spi_rf_write(ctl, RF_CHIP2, 0x1201);
} else {
spi_rf_write(ctl, RF_CHIP0, 0x1203);
spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1201);
}
}
} /* chain_update */
static void antenna_config(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
/* Write 0x0 to the TX Management Control Register Enable bit */
reg = agnx_read32(ctl, AGNX_TXM_CTL);
reg &= ~0x1;
agnx_write32(ctl, AGNX_TXM_CTL, reg);
/* FIXME */
/* Set initial value based on number of Antennae */
antenna_init(priv, 3);
/* FIXME Update Power Templates for current valid Stations */
/* sta_power_init(priv, 0);*/
/* FIXME the number of chains should get from eeprom*/
chain_update(priv, AGNX_CHAINS_MAX);
} /* antenna_config */
void calibrate_oscillator(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
reg |= 0x10;
agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 1);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 1);
agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
/* (Residual DC Calibration) to Calibration Mode */
agnx_write32(ctl, AGNX_ACI_MODE, 0x2);
spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1004);
agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
/* (TX LO Calibration) to Calibration Mode */
agnx_write32(ctl, AGNX_ACI_MODE, 0x4);
do {
u32 reg1, reg2, reg3;
/* Enable Power Saving Control */
enable_power_saving(priv);
/* Save the following registers to restore */
reg1 = ioread32(ctl + 0x11000);
reg2 = ioread32(ctl + 0xec50);
reg3 = ioread32(ctl + 0xec54);
wmb();
agnx_write32(ctl, 0x11000, 0xcfdf);
agnx_write32(ctl, 0xec50, 0x70);
/* Restore the registers */
agnx_write32(ctl, 0x11000, reg1);
agnx_write32(ctl, 0xec50, reg2);
agnx_write32(ctl, 0xec54, reg3);
/* Disable Power Saving Control */
disable_power_saving(priv);
} while (0);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0);
} /* calibrate_oscillator */
static void radio_channel_set(struct agnx_priv *priv, unsigned int channel)
{
void __iomem *ctl = priv->ctl;
unsigned int freq = priv->band.channels[channel - 1].center_freq;
u32 reg;
AGNX_TRACE;
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
/* Set SPI Clock to 50 Ns */
reg = agnx_read32(ctl, AGNX_SPI_CFG);
reg &= ~0xF;
reg |= 0x1;
agnx_write32(ctl, AGNX_SPI_CFG, reg);
/* Clear the Disable Tx interrupt bit in Interrupt Mask */
/* reg = agnx_read32(ctl, AGNX_INT_MASK); */
/* reg &= ~IRQ_TX_DISABLE; */
/* agnx_write32(ctl, AGNX_INT_MASK, reg); */
/* Band Selection */
reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
reg |= 0x8;
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
/* FIXME Set the SiLabs Chip Frequency */
synth_freq_set(priv, channel);
reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
reg |= 0x80100030;
agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
reg |= 0x20009;
agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1100);
/* Load the MonitorGain Table */
monitor_gain_table_init(priv);
/* Load the TX Fir table */
tx_fir_table_init(priv);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg |= 0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x22);
udelay(80);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
reg = agnx_read32(ctl, 0xec50);
reg |= 0x4f;
agnx_write32(ctl, 0xec50, reg);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
agnx_write32(ctl, 0x11008, 0x1);
agnx_write32(ctl, 0x1100c, 0x0);
agnx_write32(ctl, 0x11008, 0x0);
agnx_write32(ctl, 0xec50, 0xc);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
agnx_write32(ctl, 0x11010, 0x6e);
agnx_write32(ctl, 0x11014, 0x6c);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
/* Calibrate the Antenna */
/* antenna_calibrate(priv); */
/* Calibrate the TxLocalOscillator */
calibrate_oscillator(priv);
reg = agnx_read32(ctl, AGNX_PM_PMCTL);
reg &= ~0x8;
agnx_write32(ctl, AGNX_PM_PMCTL, reg);
agnx_write32(ctl, AGNX_GCR_GAININIT, 0xa);
agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
agnx_write32(ctl, 0x11018, 0xb);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
/* Write Frequency to Gain Control Channel */
agnx_write32(ctl, AGNX_GCR_RXCHANEL, freq);
/* Write 0x140000/Freq to 0x9c08 */
reg = 0x140000/freq;
agnx_write32(ctl, 0x9c08, reg);
reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
reg &= ~0x80100030;
agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
reg &= ~0x20009;
reg |= 0x1;
agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
agnx_write32(ctl, AGNX_ACI_MODE, 0x0);
/* FIXME According to Number of Chains: */
/* 1. 1: */
/* 1. Write 0x1203 to RF Chip 0 */
/* 2. Write 0x1200 to RF Chips 1 +2 */
/* 2. 2: */
/* 1. Write 0x1203 to RF Chip 0 */
/* 2. Write 0x1200 to RF Chip 2 */
/* 3. Write 0x1201 to RF Chip 1 */
/* 3. 3: */
/* 1. Write 0x1203 to RF Chip 0 */
/* 2. Write 0x1201 to RF Chip 1 + 2 */
/* 4. 4: */
/* 1. Write 0x1203 to RF Chip 0 + 1 */
/* 2. Write 0x1200 to RF Chip 2 */
/* 5. 6: */
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
spi_rf_write(ctl, RF_CHIP2, 0x1201);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
/* FIXME Set the Disable Tx interrupt bit in Interrupt Mask
(Or 0x20000 to Interrupt Mask) */
/* reg = agnx_read32(ctl, AGNX_INT_MASK); */
/* reg |= IRQ_TX_DISABLE; */
/* agnx_write32(ctl, AGNX_INT_MASK, reg); */
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
/* Configure the Antenna */
antenna_config(priv);
/* Write 0x0 to Discovery Mode Enable detect G, B, A packet? */
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0);
reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
reg |= 0x80000000;
agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
/* enable radio on and the power LED */
reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
reg &= ~0x1;
reg |= 0x2;
agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
reg = agnx_read32(ctl, AGNX_TXM_CTL);
reg |= 0x1;
agnx_write32(ctl, AGNX_TXM_CTL, reg);
} /* radio_channel_set */
static void base_band_filter_calibrate(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1001);
agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x0);
spi_rc_write(ctl, RF_CHIP0, 0x27);
spi_rc_write(ctl, RF_CHIP1, 0x27);
spi_rc_write(ctl, RF_CHIP2, 0x27);
agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x1);
}
static void print_offset(struct agnx_priv *priv, u32 chain)
{
void __iomem *ctl = priv->ctl;
u32 offset;
iowrite32((chain), ctl + AGNX_ACI_SELCHAIN);
udelay(10);
offset = (ioread32(ctl + AGNX_ACI_OFFSET));
printk(PFX "Chain is 0x%x, Offset is 0x%x\n", chain, offset);
}
void print_offsets(struct agnx_priv *priv)
{
print_offset(priv, 0);
print_offset(priv, 4);
print_offset(priv, 1);
print_offset(priv, 5);
print_offset(priv, 2);
print_offset(priv, 6);
}
struct chains {
u32 cali; /* calibrate value*/
#define NEED_CALIBRATE 0
#define SUCCESS_CALIBRATE 1
int status;
};
static void chain_calibrate(struct agnx_priv *priv, struct chains *chains,
unsigned int num)
{
void __iomem *ctl = priv->ctl;
u32 calibra = chains[num].cali;
if (num < 3)
calibra |= 0x1400;
else
calibra |= 0x1500;
switch (num) {
case 0:
case 4:
spi_rf_write(ctl, RF_CHIP0, calibra);
break;
case 1:
case 5:
spi_rf_write(ctl, RF_CHIP1, calibra);
break;
case 2:
case 6:
spi_rf_write(ctl, RF_CHIP2, calibra);
break;
default:
BUG();
}
} /* chain_calibrate */
static void inline get_calibrete_value(struct agnx_priv *priv, struct chains *chains,
unsigned int num)
{
void __iomem *ctl = priv->ctl;
u32 offset;
iowrite32((num), ctl + AGNX_ACI_SELCHAIN);
/* FIXME */
udelay(10);
offset = (ioread32(ctl + AGNX_ACI_OFFSET));
if (offset < 0xf) {
chains[num].status = SUCCESS_CALIBRATE;
return;
}
if (num == 0 || num == 1 || num == 2) {
if ( 0 == chains[num].cali)
chains[num].cali = 0xff;
else
chains[num].cali--;
} else
chains[num].cali++;
chains[num].status = NEED_CALIBRATE;
}
static inline void calibra_delay(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
unsigned int i = 100;
wmb();
while (i--) {
reg = (ioread32(ctl + AGNX_ACI_STATUS));
if (reg == 0x4000)
break;
udelay(10);
}
if (!i)
printk(PFX "calibration failed\n");
}
void do_calibration(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
struct chains chains[7];
unsigned int i, j;
AGNX_TRACE;
for (i = 0; i < 7; i++) {
if (i == 3)
continue;
chains[i].cali = 0x7f;
chains[i].status = NEED_CALIBRATE;
}
/* FIXME 0x300 is a magic number */
for (j = 0; j < 0x300; j++) {
if (chains[0].status == SUCCESS_CALIBRATE &&
chains[1].status == SUCCESS_CALIBRATE &&
chains[2].status == SUCCESS_CALIBRATE &&
chains[4].status == SUCCESS_CALIBRATE &&
chains[5].status == SUCCESS_CALIBRATE &&
chains[6].status == SUCCESS_CALIBRATE)
break;
/* Attention, there is no chain 3 */
for (i = 0; i < 7; i++) {
if (i == 3)
continue;
if (chains[i].status == NEED_CALIBRATE)
chain_calibrate(priv, chains, i);
}
/* Write 0x1 to Calibration Measure */
iowrite32((0x1), ctl + AGNX_ACI_MEASURE);
calibra_delay(priv);
for (i = 0; i < 7; i++) {
if (i == 3)
continue;
get_calibrete_value(priv, chains, i);
}
}
printk(PFX "Clibrate times is %d\n", j);
print_offsets(priv);
} /* do_calibration */
void antenna_calibrate(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
AGNX_TRACE;
agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
agnx_write32(ctl, AGNX_GCR_BOACT, 0x24);
agnx_write32(ctl, AGNX_GCR_BOINACT, 0x24);
agnx_write32(ctl, AGNX_GCR_BODYNA, 0x20);
agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
agnx_write32(ctl, AGNX_GCR_THD0AL, 0x64);
agnx_write32(ctl, AGNX_GCR_THD0B, 0x46);
agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x64);
agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x30);
spi_rc_write(ctl, RF_CHIP0, 0x20);
/* Fixme */
udelay(80);
/* 1. Should read 0x0 */
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (0x0 != reg)
printk(KERN_WARNING PFX "Unmatched rf chips result\n");
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
spi_rc_write(ctl, RF_CHIP0, 0x22);
udelay(80);
reg = agnx_read32(ctl, AGNX_SPI_RLSW);
if (0x0 != reg)
printk(KERN_WARNING PFX "Unmatched rf chips result\n");
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
reg |= 0x1c000032;
agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
reg |= 0x0003f07;
agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
reg = agnx_read32(ctl, 0xec50);
reg |= 0x40;
agnx_write32(ctl, 0xec50, reg);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff8);
agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
agnx_write32(ctl, AGNX_GCR_CHAINNUM, 0x6);
agnx_write32(ctl, 0x19874, 0x0);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
/* Calibrate the BaseBandFilter */
base_band_filter_calibrate(priv);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
/* Measure Calibration */
agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
calibra_delay(priv);
/* do calibration */
do_calibration(priv);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
disable_receiver(priv);
} /* antenna_calibrate */
void __antenna_calibrate(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
/* Calibrate the BaseBandFilter */
/* base_band_filter_calibrate(priv); */
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
/* Measure Calibration */
agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
calibra_delay(priv);
do_calibration(priv);
agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
reg &= 0xf;
agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
/* Write 0x3 Gain Control Discovery Mode */
enable_receiver(priv);
}
int agnx_set_channel(struct agnx_priv *priv, unsigned int channel)
{
AGNX_TRACE;
printk(KERN_ERR PFX "Channel is %d %s\n", channel, __func__);
radio_channel_set(priv, channel);
return 0;
}

219
drivers/staging/agnx/sta.c Normal file
View File

@ -0,0 +1,219 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include "phy.h"
#include "sta.h"
#include "debug.h"
void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
{
void __iomem *ctl = priv->ctl;
reglo &= 0xFFFF;
reglo |= 0x30000000;
reglo |= 0x40000000; /* Set status busy */
reglo |= sta_id << 16;
iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
}
void hash_write(struct agnx_priv *priv, u8 *mac_addr, u8 sta_id)
{
void __iomem *ctl = priv->ctl;
u32 reghi, reglo;
if (!is_valid_ether_addr(mac_addr))
printk(KERN_WARNING PFX "Update hash table: Invalid hwaddr!\n");
reghi = mac_addr[0] << 24 | mac_addr[1] << 16 | mac_addr[2] << 8 | mac_addr[3];
reglo = mac_addr[4] << 8 | mac_addr[5];
reglo |= 0x10000000; /* Set hash commmand */
reglo |= 0x40000000; /* Set status busy */
reglo |= sta_id << 16;
iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
if (!(reglo & 0x80000000))
printk(KERN_WARNING PFX "Update hash table failed\n");
}
void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
{
void __iomem *ctl = priv->ctl;
reglo &= 0xFFFF;
reglo |= 0x20000000;
reglo |= 0x40000000; /* Set status busy */
reglo |= sta_id << 16;
iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
}
void hash_dump(struct agnx_priv *priv, u8 sta_id)
{
void __iomem *ctl = priv->ctl;
u32 reghi, reglo;
reglo = 0x0; /* dump command */
reglo|= 0x40000000; /* status bit */
iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
iowrite32(sta_id << 16, ctl + AGNX_RXM_HASH_DUMP_DATA);
udelay(80);
reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
printk(PFX "hash cmd are : %.8x%.8x\n", reghi, reglo);
reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_FLAG);
printk(PFX "hash flag is : %.8x\n", reghi);
reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_MST);
reglo = ioread32(ctl + AGNX_RXM_HASH_DUMP_LST);
printk(PFX "hash dump mst lst: %.8x%.8x\n", reghi, reglo);
reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_DATA);
printk(PFX "hash dump data: %.8x\n", reghi);
}
void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
{
void __iomem *ctl = priv->ctl;
memcpy_fromio(power, ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
sizeof(*power));
}
inline void
set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
{
void __iomem *ctl = priv->ctl;
/* FIXME 2. Write Template to offset + station number */
memcpy_toio(ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
power, sizeof(*power));
}
void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
unsigned int sta_idx, unsigned int wq_idx)
{
void __iomem *data = priv->data;
memcpy_fromio(tx_wq, data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
sizeof(*tx_wq) * wq_idx, sizeof(*tx_wq));
}
inline void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
unsigned int sta_idx, unsigned int wq_idx)
{
void __iomem *data = priv->data;
memcpy_toio(data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
sizeof(*tx_wq) * wq_idx, tx_wq, sizeof(*tx_wq));
}
void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
{
void __iomem *data = priv->data;
memcpy_fromio(sta, data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
sizeof(*sta));
}
inline void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
{
void __iomem *data = priv->data;
memcpy_toio(data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
sta, sizeof(*sta));
}
/* FIXME */
void sta_power_init(struct agnx_priv *priv, unsigned int sta_idx)
{
struct agnx_sta_power power;
u32 reg;
AGNX_TRACE;
memset(&power, 0, sizeof(power));
reg = agnx_set_bits(EDCF, EDCF_SHIFT, 0x1);
power.reg = cpu_to_le32(reg);
set_sta_power(priv, &power, sta_idx);
udelay(40);
} /* add_power_template */
/* @num: The #number of station that is visible to the card */
static void sta_tx_workqueue_init(struct agnx_priv *priv, unsigned int sta_idx)
{
struct agnx_sta_tx_wq tx_wq;
u32 reg;
unsigned int i;
memset(&tx_wq, 0, sizeof(tx_wq));
reg = agnx_set_bits(WORK_QUEUE_VALID, WORK_QUEUE_VALID_SHIFT, 1);
reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 1);
// reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 0);
tx_wq.reg2 |= cpu_to_le32(reg);
/* Suppose all 8 traffic class are used */
for (i = 0; i < STA_TX_WQ_NUM; i++)
set_sta_tx_wq(priv, &tx_wq, sta_idx, i);
} /* sta_tx_workqueue_init */
static void sta_traffic_init(struct agnx_sta_traffic *traffic)
{
u32 reg;
memset(traffic, 0, sizeof(*traffic));
reg = agnx_set_bits(NEW_PACKET, NEW_PACKET_SHIFT, 1);
reg |= agnx_set_bits(TRAFFIC_VALID, TRAFFIC_VALID_SHIFT, 1);
// reg |= agnx_set_bits(TRAFFIC_ACK_TYPE, TRAFFIC_ACK_TYPE_SHIFT, 1);
traffic->reg0 = cpu_to_le32(reg);
/* 3. setting RX Sequence Number to 4095 */
reg = agnx_set_bits(RX_SEQUENCE_NUM, RX_SEQUENCE_NUM_SHIFT, 4095);
traffic->reg1 = cpu_to_le32(reg);
}
/* @num: The #number of station that is visible to the card */
void sta_init(struct agnx_priv *priv, unsigned int sta_idx)
{
/* FIXME the length of sta is 256 bytes Is that
* dangerous to stack overflow? */
struct agnx_sta sta;
u32 reg;
int i;
memset(&sta, 0, sizeof(sta));
/* Set valid to 1 */
reg = agnx_set_bits(STATION_VALID, STATION_VALID_SHIFT, 1);
/* Set Enable Concatenation to 0 (?) */
reg |= agnx_set_bits(ENABLE_CONCATENATION, ENABLE_CONCATENATION_SHIFT, 0);
/* Set Enable Decompression to 0 (?) */
reg |= agnx_set_bits(ENABLE_DECOMPRESSION, ENABLE_DECOMPRESSION_SHIFT, 0);
sta.reg = cpu_to_le32(reg);
/* Initialize each of the Traffic Class Structures by: */
for (i = 0; i < 8; i++)
sta_traffic_init(sta.traffic + i);
set_sta(priv, &sta, sta_idx);
sta_tx_workqueue_init(priv, sta_idx);
} /* sta_descriptor_init */

222
drivers/staging/agnx/sta.h Normal file
View File

@ -0,0 +1,222 @@
#ifndef AGNX_STA_H_
#define AGNX_STA_H_
#define STA_TX_WQ_NUM 8 /* The number of TX workqueue one STA has */
struct agnx_hash_cmd {
__be32 cmdhi;
#define MACLO 0xFFFF0000
#define MACLO_SHIFT 16
#define STA_ID 0x0000FFF0
#define STA_ID_SHIFT 4
#define CMD 0x0000000C
#define CMD_SHIFT 2
#define STATUS 0x00000002
#define STATUS_SHIFT 1
#define PASS 0x00000001
#define PASS_SHIFT 1
__be32 cmdlo;
}__attribute__((__packed__));
/*
* Station Power Template
* FIXME Just for agn100 yet
*/
struct agnx_sta_power {
__le32 reg;
#define SIGNAL 0x000000FF /* signal */
#define SIGNAL_SHIFT 0
#define RATE 0x00000F00
#define RATE_SHIFT 8
#define TIFS 0x00001000
#define TIFS_SHIFT 12
#define EDCF 0x00002000
#define EDCF_SHIFT 13
#define CHANNEL_BOND 0x00004000
#define CHANNEL_BOND_SHIFT 14
#define PHY_MODE 0x00038000
#define PHY_MODE_SHIFT 15
#define POWER_LEVEL 0x007C0000
#define POWER_LEVEL_SHIFT 18
#define NUM_TRANSMITTERS 0x00800000
#define NUM_TRANSMITTERS_SHIFT 23
} __attribute__((__packed__));
/*
* TX Workqueue Descriptor
*/
struct agnx_sta_tx_wq {
__le32 reg0;
#define HEAD_POINTER_LOW 0xFF000000 /* Head pointer low */
#define HEAD_POINTER_LOW_SHIFT 24
#define TAIL_POINTER 0x00FFFFFF /* Tail pointer */
#define TAIL_POINTER_SHIFT 0
__le32 reg3;
#define ACK_POINTER_LOW 0xFFFF0000 /* ACK pointer low */
#define ACK_POINTER_LOW_SHIFT 16
#define HEAD_POINTER_HIGH 0x0000FFFF /* Head pointer high */
#define HEAD_POINTER_HIGH_SHIFT 0
__le32 reg1;
/* ACK timeout tail packet count */
#define ACK_TIMOUT_TAIL_PACK_CNT 0xFFF00000
#define ACK_TIMOUT_TAIL_PACK_CNT_SHIFT 20
/* Head timeout tail packet count */
#define HEAD_TIMOUT_TAIL_PACK_CNT 0x000FFF00
#define HEAD_TIMOUT_TAIL_PACK_CNT_SHIFT 8
#define ACK_POINTER_HIGH 0x000000FF /* ACK pointer high */
#define ACK_POINTER_HIGH_SHIFT 0
__le32 reg2;
#define WORK_QUEUE_VALID 0x80000000 /* valid */
#define WORK_QUEUE_VALID_SHIFT 31
#define WORK_QUEUE_ACK_TYPE 0x40000000 /* ACK type */
#define WORK_QUEUE_ACK_TYPE_SHIFT 30
/* Head timeout window limit fragmentation count */
#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT 0x3FFF0000
#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT_SHIFT 16
/* Head timeout window limit byte count */
#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT 0x0000FFFF
#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT_SHIFT 0
} __attribute__((__packed__));
/*
* Traffic Class Structure
*/
struct agnx_sta_traffic {
__le32 reg0;
#define ACK_TIMOUT_CNT 0xFF800000 /* ACK Timeout Counts */
#define ACK_TIMOUT_CNT_SHIFT 23
#define TRAFFIC_ACK_TYPE 0x00600000 /* ACK Type */
#define TRAFFIC_ACK_TYPE_SHIFT 21
#define NEW_PACKET 0x00100000 /* New Packet */
#define NEW_PACKET_SHIFT 20
#define TRAFFIC_VALID 0x00080000 /* Valid */
#define TRAFFIC_VALID_SHIFT 19
#define RX_HDR_DESC_POINTER 0x0007FFFF /* RX Header Descripter pointer */
#define RX_HDR_DESC_POINTER_SHIFT 0
__le32 reg1;
#define RX_PACKET_TIMESTAMP 0xFFFF0000 /* RX Packet Timestamp */
#define RX_PACKET_TIMESTAMP_SHIFT 16
#define TRAFFIC_RESERVED 0x0000E000 /* Reserved */
#define TRAFFIC_RESERVED_SHIFT 13
#define SV 0x00001000 /* sv */
#define SV_SHIFT 12
#define RX_SEQUENCE_NUM 0x00000FFF /* RX Sequence Number */
#define RX_SEQUENCE_NUM_SHIFT 0
__le32 tx_replay_cnt_low; /* TX Replay Counter Low */
__le16 tx_replay_cnt_high; /* TX Replay Counter High */
__le16 rx_replay_cnt_high; /* RX Replay Counter High */
__be32 rx_replay_cnt_low; /* RX Replay Counter Low */
} __attribute__((__packed__));
/*
* Station Descriptors
*/
struct agnx_sta {
__le32 tx_session_keys[4]; /* Transmit Session Key (0-3) */
__le32 rx_session_keys[4]; /* Receive Session Key (0-3) */
__le32 reg;
#define ID_1 0xC0000000 /* id 1 */
#define ID_1_SHIFT 30
#define ID_0 0x30000000 /* id 0 */
#define ID_0_SHIFT 28
#define ENABLE_CONCATENATION 0x0FF00000 /* Enable concatenation */
#define ENABLE_CONCATENATION_SHIFT 20
#define ENABLE_DECOMPRESSION 0x000FF000 /* Enable decompression */
#define ENABLE_DECOMPRESSION_SHIFT 12
#define STA_RESERVED 0x00000C00 /* Reserved */
#define STA_RESERVED_SHIFT 10
#define EAP 0x00000200 /* EAP */
#define EAP_SHIFT 9
#define ED_NULL 0x00000100 /* ED NULL */
#define ED_NULL_SHIFT 8
#define ENCRYPTION_POLICY 0x000000E0 /* Encryption Policy */
#define ENCRYPTION_POLICY_SHIFT 5
#define DEFINED_KEY_ID 0x00000018 /* Defined Key ID */
#define DEFINED_KEY_ID_SHIFT 3
#define FIXED_KEY 0x00000004 /* Fixed Key */
#define FIXED_KEY_SHIFT 2
#define KEY_VALID 0x00000002 /* Key Valid */
#define KEY_VALID_SHIFT 1
#define STATION_VALID 0x00000001 /* Station Valid */
#define STATION_VALID_SHIFT 0
__le32 tx_aes_blks_unicast; /* TX AES Blks Unicast */
__le32 rx_aes_blks_unicast; /* RX AES Blks Unicast */
__le16 aes_format_err_unicast_cnt; /* AES Format Error Unicast Counts */
__le16 aes_replay_unicast; /* AES Replay Unicast */
__le16 aes_decrypt_err_unicast; /* AES Decrypt Error Unicast */
__le16 aes_decrypt_err_default; /* AES Decrypt Error default */
__le16 single_retry_packets; /* Single Retry Packets */
__le16 failed_tx_packets; /* Failed Tx Packets */
__le16 muti_retry_packets; /* Multiple Retry Packets */
__le16 ack_timeouts; /* ACK Timeouts */
__le16 frag_tx_cnt; /* Fragment TX Counts */
__le16 rts_brq_sent; /* RTS Brq Sent */
__le16 tx_packets; /* TX Packets */
__le16 cts_back_timeout; /* CTS Back Timeout */
__le32 phy_stats_high; /* PHY Stats High */
__le32 phy_stats_low; /* PHY Stats Low */
struct agnx_sta_traffic traffic[8]; /* Traffic Class Structure (8) */
__le16 traffic_class0_frag_success; /* Traffic Class 0 Fragment Success */
__le16 traffic_class1_frag_success; /* Traffic Class 1 Fragment Success */
__le16 traffic_class2_frag_success; /* Traffic Class 2 Fragment Success */
__le16 traffic_class3_frag_success; /* Traffic Class 3 Fragment Success */
__le16 traffic_class4_frag_success; /* Traffic Class 4 Fragment Success */
__le16 traffic_class5_frag_success; /* Traffic Class 5 Fragment Success */
__le16 traffic_class6_frag_success; /* Traffic Class 6 Fragment Success */
__le16 traffic_class7_frag_success; /* Traffic Class 7 Fragment Success */
__le16 num_frag_non_prime_rates; /* number of Fragments for non-prime rates */
__le16 ack_timeout_non_prime_rates; /* ACK Timeout for non-prime rates */
} __attribute__((__packed__));
struct agnx_beacon_hdr {
struct agnx_sta_power power; /* Tx Station Power Template */
u8 phy_hdr[6]; /* PHY Hdr */
u8 frame_len_lo; /* Frame Length Lo */
u8 frame_len_hi; /* Frame Length Hi */
u8 mac_hdr[24]; /* MAC Header */
/* FIXME */
/* 802.11(abg) beacon */
} __attribute__((__packed__));
void hash_write(struct agnx_priv *priv, u8 *mac_addr, u8 sta_id);
void hash_dump(struct agnx_priv *priv, u8 sta_id);
void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx);
void set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power,
unsigned int sta_idx);
void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
unsigned int sta_idx, unsigned int wq_idx);
void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
unsigned int sta_idx, unsigned int wq_idx);
void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
void sta_power_init(struct agnx_priv *priv, unsigned int num);
void sta_init(struct agnx_priv *priv, unsigned int num);
#endif /* AGNX_STA_H_ */

View File

@ -0,0 +1,168 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include "agnx.h"
#include "debug.h"
#include "phy.h"
static const u32
tx_fir_table[] = { 0x19, 0x5d, 0xce, 0x151, 0x1c3, 0x1ff, 0x1ea, 0x17c, 0xcf,
0x19, 0x38e, 0x350, 0x362, 0x3ad, 0x5, 0x44, 0x59, 0x49,
0x21, 0x3f7, 0x3e0, 0x3e3, 0x3f3, 0x0 };
void tx_fir_table_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i < ARRAY_SIZE(tx_fir_table); i++)
iowrite32(tx_fir_table[i], ctl + AGNX_FIR_BASE + i*4);
} /* fir_table_setup */
static const u32
gain_table[] = { 0x8, 0x8, 0xf, 0x13, 0x17, 0x1b, 0x1f, 0x23, 0x27, 0x2b,
0x2f, 0x33, 0x37, 0x3b, 0x3f, 0x43, 0x47, 0x4b, 0x4f,
0x53, 0x57, 0x5b, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
0x5f, 0x5f, 0x5f, 0x5f };
void gain_table_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
int i;
for (i = 0; i < ARRAY_SIZE(gain_table); i++) {
iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4);
iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4 + 0x80);
}
} /* gain_table_init */
void monitor_gain_table_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
unsigned int i;
for (i = 0; i < 0x44; i += 4) {
iowrite32(0x61, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0x61, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0x44; i < 0x64; i += 4) {
iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0x64; i < 0x94; i += 4) {
iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0x94; i < 0xdc; i += 4) {
iowrite32(0x87, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0x87, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0xdc; i < 0x148; i += 4) {
iowrite32(0x95, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0x95, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0x148; i < 0x1e8; i += 4) {
iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
for (i = 0x1e8; i <= 0x1fc; i += 4) {
iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + i);
iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + 0x200 + i);
}
} /* monitor_gain_table_init */
void routing_table_init(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
unsigned int type, subtype;
u32 reg;
disable_receiver(priv);
for ( type = 0; type < 0x3; type++ ) {
for (subtype = 0; subtype < 0x10; subtype++) {
/* 1. Set Routing table to R/W and to Return status on Read */
reg = (type << ROUTAB_TYPE_SHIFT) |
(subtype << ROUTAB_SUBTYPE_SHIFT);
reg |= (1 << ROUTAB_RW_SHIFT) | (1 << ROUTAB_STATUS_SHIFT);
if (type == ROUTAB_TYPE_DATA) {
/* NULL goes to RFP */
if (subtype == ROUTAB_SUBTYPE_NULL)
// reg |= ROUTAB_ROUTE_RFP;
reg |= ROUTAB_ROUTE_CPU;
/* QOS NULL goes to CPU */
else if (subtype == ROUTAB_SUBTYPE_QOSNULL)
reg |= ROUTAB_ROUTE_CPU;
/* All Data and QOS data subtypes go to Encryption */
else if ((subtype == ROUTAB_SUBTYPE_DATA) ||
(subtype == ROUTAB_SUBTYPE_DATAACK) ||
(subtype == ROUTAB_SUBTYPE_DATAPOLL) ||
(subtype == ROUTAB_SUBTYPE_DATAPOLLACK) ||
(subtype == ROUTAB_SUBTYPE_QOSDATA) ||
(subtype == ROUTAB_SUBTYPE_QOSDATAACK) ||
(subtype == ROUTAB_SUBTYPE_QOSDATAPOLL) ||
(subtype == ROUTAB_SUBTYPE_QOSDATAACKPOLL))
reg |= ROUTAB_ROUTE_ENCRY;
// reg |= ROUTAB_ROUTE_CPU;
/*Drop NULL and QOS NULL ack, poll and poll ack*/
else if ((subtype == ROUTAB_SUBTYPE_NULLACK) ||
(subtype == ROUTAB_SUBTYPE_QOSNULLACK) ||
(subtype == ROUTAB_SUBTYPE_NULLPOLL) ||
(subtype == ROUTAB_SUBTYPE_QOSNULLPOLL) ||
(subtype == ROUTAB_SUBTYPE_NULLPOLLACK) ||
(subtype == ROUTAB_SUBTYPE_QOSNULLPOLLACK))
// reg |= ROUTAB_ROUTE_DROP;
reg |= ROUTAB_ROUTE_CPU;
}
else
reg |= (ROUTAB_ROUTE_CPU);
iowrite32(reg, ctl + AGNX_RXM_ROUTAB);
/* Check to verify that the status bit cleared */
routing_table_delay();
}
}
enable_receiver(priv);
} /* routing_table_init */
void tx_engine_lookup_tbl_init(struct agnx_priv *priv)
{
void __iomem *data = priv->data;
unsigned int i;
for (i = 0; i <= 28; i += 4)
iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
for (i = 32; i <= 120; i += 8) {
iowrite32(0x1e58, data + AGNX_ENGINE_LOOKUP_TBL + i);
iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
}
for (i = 128; i <= 156; i += 4)
iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
for (i = 160; i <= 248; i += 8) {
iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i);
iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
}
for (i = 256; i <= 284; i += 4)
iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
for (i = 288; i <= 376; i += 8) {
iowrite32(0x1a58, data + AGNX_ENGINE_LOOKUP_TBL + i);
iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
}
for (i = 512; i <= 540; i += 4)
iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
for (i = 544; i <= 632; i += 8) {
iowrite32(0x2058, data + AGNX_ENGINE_LOOKUP_TBL + i);
iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
}
for (i = 640; i <= 668; i += 4)
iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i);
for (i = 672; i <= 764; i += 8) {
iowrite32(0x2258, data + AGNX_ENGINE_LOOKUP_TBL + i);
iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
}
}

View File

@ -0,0 +1,10 @@
#ifndef AGNX_TABLE_H_
#define AGNX_TABLE_H_
void tx_fir_table_init(struct agnx_priv *priv);
void gain_table_init(struct agnx_priv *priv);
void monitor_gain_table_init(struct agnx_priv *priv);
void routing_table_init(struct agnx_priv *priv);
void tx_engine_lookup_tbl_init(struct agnx_priv *priv);
#endif /* AGNX_TABLE_H_ */

819
drivers/staging/agnx/xmit.c Normal file
View File

@ -0,0 +1,819 @@
/**
* Airgo MIMO wireless driver
*
* Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
* Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
* works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include "agnx.h"
#include "debug.h"
#include "phy.h"
unsigned int rx_frame_cnt = 0;
//unsigned int local_tx_sent_cnt = 0;
static inline void disable_rx_engine(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
iowrite32(0x100, ctl + AGNX_CIR_RXCTL);
/* Wait for RX Control to have the Disable Rx Interrupt (0x100) set */
ioread32(ctl + AGNX_CIR_RXCTL);
}
static inline void enable_rx_engine(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
iowrite32(0x80, ctl + AGNX_CIR_RXCTL);
ioread32(ctl + AGNX_CIR_RXCTL);
}
inline void disable_rx_interrupt(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
disable_rx_engine(priv);
reg = ioread32(ctl + AGNX_CIR_RXCFG);
reg &= ~0x20;
iowrite32(reg, ctl + AGNX_CIR_RXCFG);
ioread32(ctl + AGNX_CIR_RXCFG);
}
inline void enable_rx_interrupt(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
u32 reg;
reg = ioread32(ctl + AGNX_CIR_RXCFG);
reg |= 0x20;
iowrite32(reg, ctl + AGNX_CIR_RXCFG);
ioread32(ctl + AGNX_CIR_RXCFG);
enable_rx_engine(priv);
}
static inline void rx_desc_init(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_desc *desc = priv->rx.desc + idx;
struct agnx_info *info = priv->rx.info + idx;
memset(info, 0, sizeof(*info));
info->dma_len = IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct agnx_hdr);
info->skb = dev_alloc_skb(info->dma_len);
if (info->skb == NULL)
agnx_bug("refill err");
info->mapping = pci_map_single(priv->pdev, skb_tail_pointer(info->skb),
info->dma_len, PCI_DMA_FROMDEVICE);
memset(desc, 0, sizeof(*desc));
desc->dma_addr = cpu_to_be32(info->mapping);
/* Set the owner to the card */
desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
}
static inline void rx_desc_reinit(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_info *info = priv->rx.info + idx;
/* Cause ieee80211 will free the skb buffer, so we needn't to free it again?! */
pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
rx_desc_init(priv, idx);
}
static inline void rx_desc_reusing(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_desc *desc = priv->rx.desc + idx;
struct agnx_info *info = priv->rx.info + idx;
memset(desc, 0, sizeof(*desc));
desc->dma_addr = cpu_to_be32(info->mapping);
/* Set the owner to the card */
desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
}
static void rx_desc_free(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_desc *desc = priv->rx.desc + idx;
struct agnx_info *info = priv->rx.info + idx;
BUG_ON(!desc || !info);
if (info->mapping)
pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
if (info->skb)
dev_kfree_skb(info->skb);
memset(info, 0, sizeof(*info));
memset(desc, 0, sizeof(*desc));
}
static inline void __tx_desc_free(struct agnx_priv *priv,
struct agnx_desc *desc, struct agnx_info *info)
{
BUG_ON(!desc || !info);
/* TODO make sure mapping, skb and len are consistency */
if (info->mapping)
pci_unmap_single(priv->pdev, info->mapping,
info->dma_len, PCI_DMA_TODEVICE);
if (info->type == PACKET)
dev_kfree_skb(info->skb);
memset(info, 0, sizeof(*info));
memset(desc, 0, sizeof(*desc));
}
static void txm_desc_free(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_desc *desc = priv->txm.desc + idx;
struct agnx_info *info = priv->txm.info + idx;
__tx_desc_free(priv, desc, info);
}
static void txd_desc_free(struct agnx_priv *priv, unsigned int idx)
{
struct agnx_desc *desc = priv->txd.desc + idx;
struct agnx_info *info = priv->txd.info + idx;
__tx_desc_free(priv, desc, info);
}
int fill_rings(struct agnx_priv *priv)
{
void __iomem *ctl = priv->ctl;
unsigned int i;
u32 reg;
AGNX_TRACE;
priv->txd.idx_sent = priv->txm.idx_sent = 0;
priv->rx.idx = priv->txm.idx = priv->txd.idx = 0;
for (i = 0; i < priv->rx.size; i++)
rx_desc_init(priv, i);
for (i = 0; i < priv->txm.size; i++) {
memset(priv->txm.desc + i, 0, sizeof(struct agnx_desc));
memset(priv->txm.info + i, 0, sizeof(struct agnx_info));
}
for (i = 0; i < priv->txd.size; i++) {
memset(priv->txd.desc + i, 0, sizeof(struct agnx_desc));
memset(priv->txd.info + i, 0, sizeof(struct agnx_info));
}
/* FIXME Set the card RX TXM and TXD address */
agnx_write32(ctl, AGNX_CIR_RXCMSTART, priv->rx.dma);
agnx_write32(ctl, AGNX_CIR_RXCMEND, priv->txm.dma);
agnx_write32(ctl, AGNX_CIR_TXMSTART, priv->txm.dma);
agnx_write32(ctl, AGNX_CIR_TXMEND, priv->txd.dma);
agnx_write32(ctl, AGNX_CIR_TXDSTART, priv->txd.dma);
agnx_write32(ctl, AGNX_CIR_TXDEND, priv->txd.dma +
sizeof(struct agnx_desc) * priv->txd.size);
/* FIXME Relinquish control of rings to card */
reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
reg &= ~0x800;
agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
return 0;
} /* fill_rings */
void unfill_rings(struct agnx_priv *priv)
{
unsigned long flags;
unsigned int i;
AGNX_TRACE;
spin_lock_irqsave(&priv->lock, flags);
for (i = 0; i < priv->rx.size; i++)
rx_desc_free(priv, i);
for (i = 0; i < priv->txm.size; i++)
txm_desc_free(priv, i);
for (i = 0; i < priv->txd.size; i++)
txd_desc_free(priv, i);
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Extract the bitrate out of a CCK PLCP header.
copy from bcm43xx driver */
static inline u8 agnx_plcp_get_bitrate_cck(__be32 *phyhdr_11b)
{
/* FIXME */
switch (*(u8 *)phyhdr_11b) {
case 0x0A:
return 0;
case 0x14:
return 1;
case 0x37:
return 2;
case 0x6E:
return 3;
}
agnx_bug("Wrong plcp rate");
return 0;
}
/* FIXME */
static inline u8 agnx_plcp_get_bitrate_ofdm(__be32 *phyhdr_11g)
{
u8 rate = *(u8 *)phyhdr_11g & 0xF;
printk(PFX "G mode rate is 0x%x\n", rate);
return rate;
}
/* FIXME */
static void get_rx_stats(struct agnx_priv *priv, struct agnx_hdr *hdr,
struct ieee80211_rx_status *stat)
{
void __iomem *ctl = priv->ctl;
u8 *rssi;
u32 noise;
/* FIXME just for test */
int snr = 40; /* signal-to-noise ratio */
memset(stat, 0, sizeof(*stat));
/* RSSI */
rssi = (u8 *)&hdr->phy_stats_lo;
// stat->ssi = (rssi[0] + rssi[1] + rssi[2]) / 3;
/* Noise */
noise = ioread32(ctl + AGNX_GCR_NOISE0);
noise += ioread32(ctl + AGNX_GCR_NOISE1);
noise += ioread32(ctl + AGNX_GCR_NOISE2);
stat->noise = noise / 3;
/* Signal quality */
//snr = stat->ssi - stat->noise;
if (snr >=0 && snr < 40)
stat->signal = 5 * snr / 2;
else if (snr >= 40)
stat->signal = 100;
else
stat->signal = 0;
if (hdr->_11b0 && !hdr->_11g0) {
stat->rate_idx = agnx_plcp_get_bitrate_cck(&hdr->_11b0);
} else if (!hdr->_11b0 && hdr->_11g0) {
printk(PFX "RX: Found G mode packet\n");
stat->rate_idx = agnx_plcp_get_bitrate_ofdm(&hdr->_11g0);
} else
agnx_bug("Unknown packets type");
stat->band = IEEE80211_BAND_2GHZ;
stat->freq = agnx_channels[priv->channel - 1].center_freq;
// stat->antenna = 3;
// stat->mactime = be32_to_cpu(hdr->time_stamp);
// stat->channel = priv->channel;
}
static inline void combine_hdr_frag(struct ieee80211_hdr *ieeehdr,
struct sk_buff *skb)
{
u16 fctl;
unsigned int hdrlen;
fctl = le16_to_cpu(ieeehdr->frame_control);
hdrlen = ieee80211_hdrlen(fctl);
/* FIXME */
if (hdrlen < (2+2+6)/*minimum hdr*/ ||
hdrlen > sizeof(struct ieee80211_mgmt)) {
printk(KERN_ERR PFX "hdr len is %d\n", hdrlen);
agnx_bug("Wrong ieee80211 hdr detected");
}
skb_push(skb, hdrlen);
memcpy(skb->data, ieeehdr, hdrlen);
} /* combine_hdr_frag */
static inline int agnx_packet_check(struct agnx_priv *priv, struct agnx_hdr *agnxhdr,
unsigned packet_len)
{
if (agnx_get_bits(CRC_FAIL, CRC_FAIL_SHIFT, be32_to_cpu(agnxhdr->reg1)) == 1){
printk(PFX "RX: CRC check fail\n");
goto drop;
}
if (packet_len > 2048) {
printk(PFX "RX: Too long packet detected\n");
goto drop;
}
/* FIXME Just usable for Promious Mode, for Manage mode exclude FCS */
/* if (packet_len - sizeof(*agnxhdr) < FCS_LEN) { */
/* printk(PFX "RX: Too short packet detected\n"); */
/* goto drop; */
/* } */
return 0;
drop:
priv->stats.dot11FCSErrorCount++;
return -1;
}
void handle_rx_irq(struct agnx_priv *priv)
{
struct ieee80211_rx_status status;
unsigned int len;
// AGNX_TRACE;
do {
struct agnx_desc *desc;
u32 frag;
struct agnx_info *info;
struct agnx_hdr *hdr;
struct sk_buff *skb;
unsigned int i = priv->rx.idx % priv->rx.size;
desc = priv->rx.desc + i;
frag = be32_to_cpu(desc->frag);
if (frag & OWNER)
break;
info = priv->rx.info + i;
skb = info->skb;
hdr = (struct agnx_hdr *)(skb->data);
len = (frag & PACKET_LEN) >> PACKET_LEN_SHIFT;
if (agnx_packet_check(priv, hdr, len) == -1) {
rx_desc_reusing(priv, i);
continue;
}
skb_put(skb, len);
do {
u16 fctl;
fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr)->frame_control);
if ((fctl & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_BEACON)// && !(fctl & IEEE80211_STYPE_BEACON))
dump_ieee80211_hdr((struct ieee80211_hdr *)hdr->mac_hdr, "RX");
} while (0);
if (hdr->_11b0 && !hdr->_11g0) {
/* int j; */
/* u16 fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr) */
/* ->frame_control); */
/* if ( (fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { */
/* agnx_print_rx_hdr(hdr); */
// agnx_print_sta(priv, BSSID_STAID);
/* for (j = 0; j < 8; j++) */
/* agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
/* } */
get_rx_stats(priv, hdr, &status);
skb_pull(skb, sizeof(*hdr));
combine_hdr_frag((struct ieee80211_hdr *)hdr->mac_hdr, skb);
} else if (!hdr->_11b0 && hdr->_11g0) {
// int j;
agnx_print_rx_hdr(hdr);
agnx_print_sta(priv, BSSID_STAID);
// for (j = 0; j < 8; j++)
agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
print_hex_dump_bytes("agnx: RX_PACKET: ", DUMP_PREFIX_NONE,
skb->data, skb->len + 8);
// if (agnx_plcp_get_bitrate_ofdm(&hdr->_11g0) == 0)
get_rx_stats(priv, hdr, &status);
skb_pull(skb, sizeof(*hdr));
combine_hdr_frag((struct ieee80211_hdr *)
((void *)&hdr->mac_hdr), skb);
// dump_ieee80211_hdr((struct ieee80211_hdr *)skb->data, "RX G");
} else
agnx_bug("Unknown packets type");
ieee80211_rx_irqsafe(priv->hw, skb, &status);
rx_desc_reinit(priv, i);
} while ( priv->rx.idx++ );
} /* handle_rx_irq */
static inline void handle_tx_irq(struct agnx_priv *priv, struct agnx_ring *ring)
{
struct agnx_desc *desc;
struct agnx_info *info;
unsigned int idx;
for (idx = ring->idx_sent; idx < ring->idx; idx++) {
unsigned int i = idx % ring->size;
u32 frag;
desc = ring->desc + i;
info = ring->info + i;
frag = be32_to_cpu(desc->frag);
if (frag & OWNER) {
if (info->type == HEADER)
break;
else
agnx_bug("TX error");
}
pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_TODEVICE);
do {
// int j;
size_t len;
len = info->skb->len - sizeof(struct agnx_hdr) + info->hdr_len;
// if (len == 614) {
// agnx_print_desc(desc);
if (info->type == PACKET) {
// agnx_print_tx_hdr((struct agnx_hdr *)info->skb->data);
/* agnx_print_sta_power(priv, LOCAL_STAID); */
/* agnx_print_sta(priv, LOCAL_STAID); */
/* // for (j = 0; j < 8; j++) */
/* agnx_print_sta_tx_wq(priv, LOCAL_STAID, 0); */
// agnx_print_sta_power(priv, BSSID_STAID);
// agnx_print_sta(priv, BSSID_STAID);
// for (j = 0; j < 8; j++)
// agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
}
// }
} while (0);
if (info->type == PACKET) {
// dump_txm_registers(priv);
// dump_rxm_registers(priv);
// dump_bm_registers(priv);
// dump_cir_registers(priv);
}
if (info->type == PACKET) {
// struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(info->skb);
skb_pull(info->skb, sizeof(struct agnx_hdr));
memcpy(skb_push(info->skb, info->hdr_len), &info->hdr, info->hdr_len);
// dump_ieee80211_hdr((struct ieee80211_hdr *)info->skb->data, "TX_HANDLE");
/* print_hex_dump_bytes("agnx: TX_HANDLE: ", DUMP_PREFIX_NONE, */
/* info->skb->data, info->skb->len); */
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK))
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(priv->hw, info->skb);
/* info->tx_status.queue_number = (ring->size - i) / 2; */
/* ieee80211_tx_status_irqsafe(priv->hw, info->skb, &(info->tx_status)); */
/* } else */
/* dev_kfree_skb_irq(info->skb); */
}
memset(desc, 0, sizeof(*desc));
memset(info, 0, sizeof(*info));
}
ring->idx_sent = idx;
/* TODO fill the priv->low_level_stats */
/* ieee80211_wake_queue(priv->hw, 0); */
}
void handle_txm_irq(struct agnx_priv *priv)
{
handle_tx_irq(priv, &priv->txm);
}
void handle_txd_irq(struct agnx_priv *priv)
{
handle_tx_irq(priv, &priv->txd);
}
void handle_other_irq(struct agnx_priv *priv)
{
// void __iomem *ctl = priv->ctl;
u32 status = priv->irq_status;
void __iomem *ctl = priv->ctl;
u32 reg;
if (status & IRQ_TX_BEACON) {
iowrite32(IRQ_TX_BEACON, ctl + AGNX_INT_STAT);
printk(PFX "IRQ: TX Beacon control is 0X%.8X\n", ioread32(ctl + AGNX_TXM_BEACON_CTL));
printk(PFX "IRQ: TX Beacon rx frame num: %d\n", rx_frame_cnt);
}
if (status & IRQ_TX_RETRY) {
reg = ioread32(ctl + AGNX_TXM_RETRYSTAID);
printk(PFX "IRQ: TX Retry, RETRY STA ID is %x\n", reg);
}
if (status & IRQ_TX_ACTIVITY)
printk(PFX "IRQ: TX Activity\n");
if (status & IRQ_RX_ACTIVITY)
printk(PFX "IRQ: RX Activity\n");
if (status & IRQ_RX_X)
printk(PFX "IRQ: RX X\n");
if (status & IRQ_RX_Y) {
reg = ioread32(ctl + AGNX_INT_MASK);
reg &= ~IRQ_RX_Y;
iowrite32(reg, ctl + AGNX_INT_MASK);
iowrite32(IRQ_RX_Y, ctl + AGNX_INT_STAT);
printk(PFX "IRQ: RX Y\n");
}
if (status & IRQ_RX_HASHHIT) {
reg = ioread32(ctl + AGNX_INT_MASK);
reg &= ~IRQ_RX_HASHHIT;
iowrite32(reg, ctl + AGNX_INT_MASK);
iowrite32(IRQ_RX_HASHHIT, ctl + AGNX_INT_STAT);
printk(PFX "IRQ: RX Hash Hit\n");
}
if (status & IRQ_RX_FRAME) {
reg = ioread32(ctl + AGNX_INT_MASK);
reg &= ~IRQ_RX_FRAME;
iowrite32(reg, ctl + AGNX_INT_MASK);
iowrite32(IRQ_RX_FRAME, ctl + AGNX_INT_STAT);
printk(PFX "IRQ: RX Frame\n");
rx_frame_cnt++;
}
if (status & IRQ_ERR_INT) {
iowrite32(IRQ_ERR_INT, ctl + AGNX_INT_STAT);
// agnx_hw_reset(priv);
printk(PFX "IRQ: Error Interrupt\n");
}
if (status & IRQ_TX_QUE_FULL)
printk(PFX "IRQ: TX Workqueue Full\n");
if (status & IRQ_BANDMAN_ERR)
printk(PFX "IRQ: Bandwidth Management Error\n");
if (status & IRQ_TX_DISABLE)
printk(PFX "IRQ: TX Disable\n");
if (status & IRQ_RX_IVASESKEY)
printk(PFX "IRQ: RX Invalid Session Key\n");
if (status & IRQ_REP_THHIT)
printk(PFX "IRQ: Replay Threshold Hit\n");
if (status & IRQ_TIMER1)
printk(PFX "IRQ: Timer1\n");
if (status & IRQ_TIMER_CNT)
printk(PFX "IRQ: Timer Count\n");
if (status & IRQ_PHY_FASTINT)
printk(PFX "IRQ: Phy Fast Interrupt\n");
if (status & IRQ_PHY_SLOWINT)
printk(PFX "IRQ: Phy Slow Interrupt\n");
if (status & IRQ_OTHER)
printk(PFX "IRQ: 0x80000000\n");
} /* handle_other_irq */
static inline void route_flag_set(struct agnx_hdr *txhdr)
{
// u32 reg = 0;
/* FIXME */
/* reg = (0x7 << ROUTE_COMPRESSION_SHIFT) & ROUTE_COMPRESSION; */
/* txhdr->reg5 = cpu_to_be32(reg); */
txhdr->reg5 = (0xa << 0x0) | (0x7 << 0x18);
// txhdr->reg5 = cpu_to_be32((0xa << 0x0) | (0x7 << 0x18));
// txhdr->reg5 = cpu_to_be32(0x7 << 0x0);
}
/* Return 0 if no match */
static inline unsigned int get_power_level(unsigned int rate, unsigned int antennas_num)
{
unsigned int power_level;
switch (rate) {
case 10:
case 20:
case 55:
case 60:
case 90:
case 120: power_level = 22; break;
case 180: power_level = 19; break;
case 240: power_level = 18; break;
case 360: power_level = 16; break;
case 480: power_level = 15; break;
case 540: power_level = 14; break;
default:
agnx_bug("Error rate setting\n");
}
if (power_level && (antennas_num == 2))
power_level -= 3;
return power_level;
}
static inline void fill_agnx_hdr(struct agnx_priv *priv, struct agnx_info *tx_info)
{
struct agnx_hdr *txhdr = (struct agnx_hdr *)tx_info->skb->data;
size_t len;
u16 fc = le16_to_cpu(*(__le16 *)&tx_info->hdr);
u32 reg;
memset(txhdr, 0, sizeof(*txhdr));
// reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, LOCAL_STAID);
reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, BSSID_STAID);
reg |= agnx_set_bits(WORKQUEUE_ID, WORKQUEUE_ID_SHIFT, 0);
txhdr->reg4 = cpu_to_be32(reg);
/* Set the Hardware Sequence Number to 1? */
reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 0);
// reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 1);
reg |= agnx_set_bits(MAC_HDR_LEN, MAC_HDR_LEN_SHIFT, tx_info->hdr_len);
txhdr->reg1 = cpu_to_be32(reg);
/* Set the agnx_hdr's MAC header */
memcpy(txhdr->mac_hdr, &tx_info->hdr, tx_info->hdr_len);
reg = agnx_set_bits(ACK, ACK_SHIFT, 1);
// reg = agnx_set_bits(ACK, ACK_SHIFT, 0);
reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 0);
// reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 1);
reg |= agnx_set_bits(RELAY, RELAY_SHIFT, 0);
reg |= agnx_set_bits(TM, TM_SHIFT, 0);
txhdr->reg0 = cpu_to_be32(reg);
/* Set the long and short retry limits */
txhdr->tx.short_retry_limit = tx_info->txi->control.rates[0].count;
txhdr->tx.long_retry_limit = tx_info->txi->control.rates[0].count;
/* FIXME */
len = tx_info->skb->len - sizeof(*txhdr) + tx_info->hdr_len + FCS_LEN;
if (fc & IEEE80211_FCTL_PROTECTED)
len += 8;
len = 2398;
reg = agnx_set_bits(FRAG_SIZE, FRAG_SIZE_SHIFT, len);
len = tx_info->skb->len - sizeof(*txhdr);
reg |= agnx_set_bits(PAYLOAD_LEN, PAYLOAD_LEN_SHIFT, len);
txhdr->reg3 = cpu_to_be32(reg);
route_flag_set(txhdr);
} /* fill_hdr */
static void txm_power_set(struct agnx_priv *priv,
struct ieee80211_tx_info *txi)
{
struct agnx_sta_power power;
u32 reg;
/* FIXME */
if (txi->control.rates[0].idx < 0) {
/* For B mode Short Preamble */
reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_SHORT);
// control->tx_rate = -control->tx_rate;
} else
reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211G);
// reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_LONG);
reg |= agnx_set_bits(SIGNAL, SIGNAL_SHIFT, 0xB);
reg |= agnx_set_bits(RATE, RATE_SHIFT, 0xB);
// reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 15);
reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 20);
/* if rate < 11M set it to 0 */
reg |= agnx_set_bits(NUM_TRANSMITTERS, NUM_TRANSMITTERS_SHIFT, 1);
// reg |= agnx_set_bits(EDCF, EDCF_SHIFT, 1);
// reg |= agnx_set_bits(TIFS, TIFS_SHIFT, 1);
power.reg = reg;
// power.reg = cpu_to_le32(reg);
// set_sta_power(priv, &power, LOCAL_STAID);
set_sta_power(priv, &power, BSSID_STAID);
}
static inline int tx_packet_check(struct sk_buff *skb)
{
unsigned int ieee_len = ieee80211_get_hdrlen_from_skb(skb);
if (skb->len > 2048) {
printk(KERN_ERR PFX "length is %d\n", skb->len);
agnx_bug("Too long TX skb");
return -1;
}
/* FIXME */
if (skb->len == ieee_len) {
printk(PFX "A strange TX packet\n");
return -1;
/* tx_faile_irqsafe(); */
}
return 0;
}
static int __agnx_tx(struct agnx_priv *priv, struct sk_buff *skb,
struct agnx_ring *ring)
{
struct agnx_desc *hdr_desc, *frag_desc;
struct agnx_info *hdr_info, *frag_info;
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&priv->lock, flags);
/* The RX interrupt need be Disable until this TX packet
is handled in the next tx interrupt */
disable_rx_interrupt(priv);
i = ring->idx;
ring->idx += 2;
/* if (priv->txm_idx - priv->txm_idx_sent == AGNX_TXM_RING_SIZE - 2) */
/* ieee80211_stop_queue(priv->hw, 0); */
/* Set agnx header's info and desc */
i %= ring->size;
hdr_desc = ring->desc + i;
hdr_info = ring->info + i;
hdr_info->hdr_len = ieee80211_get_hdrlen_from_skb(skb);
memcpy(&hdr_info->hdr, skb->data, hdr_info->hdr_len);
/* Add the agnx header to the front of the SKB */
skb_push(skb, sizeof(struct agnx_hdr) - hdr_info->hdr_len);
hdr_info->txi = txi;
hdr_info->dma_len = sizeof(struct agnx_hdr);
hdr_info->skb = skb;
hdr_info->type = HEADER;
fill_agnx_hdr(priv, hdr_info);
hdr_info->mapping = pci_map_single(priv->pdev, skb->data,
hdr_info->dma_len, PCI_DMA_TODEVICE);
do {
u32 frag = 0;
frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 1);
frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 0);
frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
frag |= agnx_set_bits(FIRST_FRAG_LEN, FIRST_FRAG_LEN_SHIFT, 1);
frag |= agnx_set_bits(OWNER, OWNER_SHIFT, 1);
hdr_desc->frag = cpu_to_be32(frag);
} while (0);
hdr_desc->dma_addr = cpu_to_be32(hdr_info->mapping);
/* Set Frag's info and desc */
i = (i + 1) % ring->size;
frag_desc = ring->desc + i;
frag_info = ring->info + i;
memcpy(frag_info, hdr_info, sizeof(struct agnx_info));
frag_info->type = PACKET;
frag_info->dma_len = skb->len - hdr_info->dma_len;
frag_info->mapping = pci_map_single(priv->pdev, skb->data + hdr_info->dma_len,
frag_info->dma_len, PCI_DMA_TODEVICE);
do {
u32 frag = 0;
frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 0);
frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 1);
frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
frag |= agnx_set_bits(SUB_FRAG_LEN, SUB_FRAG_LEN_SHIFT, frag_info->dma_len);
frag_desc->frag = cpu_to_be32(frag);
} while (0);
frag_desc->dma_addr = cpu_to_be32(frag_info->mapping);
txm_power_set(priv, txi);
/* do { */
/* int j; */
/* size_t len; */
/* len = skb->len - hdr_info->dma_len + hdr_info->hdr_len; */
/* // if (len == 614) { */
/* agnx_print_desc(hdr_desc); */
/* agnx_print_desc(frag_desc); */
/* agnx_print_tx_hdr((struct agnx_hdr *)skb->data); */
/* agnx_print_sta_power(priv, LOCAL_STAID); */
/* agnx_print_sta(priv, LOCAL_STAID); */
/* for (j = 0; j < 8; j++) */
/* agnx_print_sta_tx_wq(priv, LOCAL_STAID, j); */
/* agnx_print_sta_power(priv, BSSID_STAID); */
/* agnx_print_sta(priv, BSSID_STAID); */
/* for (j = 0; j < 8; j++) */
/* agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
/* // } */
/* } while (0); */
spin_unlock_irqrestore(&priv->lock, flags);
/* FIXME ugly code */
/* Trigger TXM */
do {
u32 reg;
reg = (ioread32(priv->ctl + AGNX_CIR_TXMCTL));
reg |= 0x8;
iowrite32((reg), priv->ctl + AGNX_CIR_TXMCTL);
}while (0);
/* Trigger TXD */
do {
u32 reg;
reg = (ioread32(priv->ctl + AGNX_CIR_TXDCTL));
reg |= 0x8;
iowrite32((reg), priv->ctl + AGNX_CIR_TXDCTL);
}while (0);
return 0;
}
int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb)
{
u16 fctl;
if (tx_packet_check(skb))
return 0;
/* print_hex_dump_bytes("agnx: TX_PACKET: ", DUMP_PREFIX_NONE, */
/* skb->data, skb->len); */
fctl = le16_to_cpu(*((__le16 *)skb->data));
if ( (fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA )
return __agnx_tx(priv, skb, &priv->txd);
else
return __agnx_tx(priv, skb, &priv->txm);
}

250
drivers/staging/agnx/xmit.h Normal file
View File

@ -0,0 +1,250 @@
#ifndef AGNX_XMIT_H_
#define AGNX_XMIT_H_
#include <net/mac80211.h>
struct agnx_priv;
static inline u32 agnx_set_bits(u32 mask, u8 shift, u32 value)
{
return (value << shift) & mask;
}
static inline u32 agnx_get_bits(u32 mask, u8 shift, u32 value)
{
return (value & mask) >> shift;
}
struct agnx_rx {
__be16 rx_packet_duration; /* RX Packet Duration */
__be16 replay_cnt; /* Replay Count */
} __attribute__((__packed__));
struct agnx_tx {
u8 long_retry_limit; /* Long Retry Limit */
u8 short_retry_limit; /* Short Retry Limit */
u8 long_retry_cnt; /* Long Retry Count */
u8 short_retry_cnt; /* Short Retry Count */
} __attribute__((__packed__));
/* Copy from bcm43xx */
#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
#define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__, nr_bytes)
#define P4D_BIT3S(magic, nr_bits) __be32 __padding##magic:nr_bits
#define P4D_BITS(line, nr_bits) P4D_BIT3S(line, nr_bits)
#define PAD_BITS(nr_bits) P4D_BITS(__LINE__, nr_bits)
struct agnx_hdr {
__be32 reg0;
#define RTS 0x80000000 /* RTS */
#define RTS_SHIFT 31
#define MULTICAST 0x40000000 /* multicast */
#define MULTICAST_SHIFT 30
#define ACK 0x30000000 /* ACK */
#define ACK_SHIFT 28
#define TM 0x08000000 /* TM */
#define TM_SHIFT 27
#define RELAY 0x04000000 /* Relay */
#define RELAY_SHIFT 26
/* PAD_BITS(4); */
#define REVISED_FCS 0x00380000 /* revised FCS */
#define REVISED_FCS_SHIFT 19
#define NEXT_BUFFER_ADDR 0x0007FFFF /* Next Buffer Address */
#define NEXT_BUFFER_ADDR_SHIFT 0
__be32 reg1;
#define MAC_HDR_LEN 0xFC000000 /* MAC Header Length */
#define MAC_HDR_LEN_SHIFT 26
#define DURATION_OVERIDE 0x02000000 /* Duration Override */
#define DURATION_OVERIDE_SHIFT 25
#define PHY_HDR_OVERIDE 0x01000000 /* PHY Header Override */
#define PHY_HDR_OVERIDE_SHIFT 24
#define CRC_FAIL 0x00800000 /* CRC fail */
#define CRC_FAIL_SHIFT 23
/* PAD_BITS(1); */
#define SEQUENCE_NUMBER 0x00200000 /* Sequence Number */
#define SEQUENCE_NUMBER_SHIFT 21
/* PAD_BITS(2); */
#define BUFF_HEAD_ADDR 0x0007FFFF /* Buffer Head Address */
#define BUFF_HEAD_ADDR_SHIFT 0
__be32 reg2;
#define PDU_COUNT 0xFC000000 /* PDU Count */
#define PDU_COUNT_SHIFT 26
/* PAD_BITS(3); */
#define WEP_KEY 0x00600000 /* WEP Key # */
#define WEP_KEY_SHIFT 21
#define USES_WEP_KEY 0x00100000 /* Uses WEP Key */
#define USES_WEP_KEY_SHIFT 20
#define KEEP_ALIVE 0x00080000 /* Keep alive */
#define KEEP_ALIVE_SHIFT 19
#define BUFF_TAIL_ADDR 0x0007FFFF /* Buffer Tail Address */
#define BUFF_TAIL_ADDR_SHIFT 0
__be32 reg3;
#define CTS_11G 0x80000000 /* CTS in 11g */
#define CTS_11G_SHIFT 31
#define RTS_11G 0x40000000 /* RTS in 11g */
#define RTS_11G_SHIFT 30
/* PAD_BITS(2); */
#define FRAG_SIZE 0x0FFF0000 /* fragment size */
#define FRAG_SIZE_SHIFT 16
#define PAYLOAD_LEN 0x0000FFF0 /* payload length */
#define PAYLOAD_LEN_SHIFT 4
#define FRAG_NUM 0x0000000F /* number of frags */
#define FRAG_NUM_SHIFT 0
__be32 reg4;
/* PAD_BITS(4); */
#define RELAY_STAID 0x0FFF0000 /* relayStald */
#define RELAY_STAID_SHIFT 16
#define STATION_ID 0x0000FFF0 /* Station ID */
#define STATION_ID_SHIFT 4
#define WORKQUEUE_ID 0x0000000F /* Workqueue ID */
#define WORKQUEUE_ID_SHIFT 0
/* FIXME this register maybe is LE? */
__be32 reg5;
/* PAD_BITS(4); */
#define ROUTE_HOST 0x0F000000
#define ROUTE_HOST_SHIFT 24
#define ROUTE_CARD_CPU 0x00F00000
#define ROUTE_CARD_CPU_SHIFT 20
#define ROUTE_ENCRYPTION 0x000F0000
#define ROUTE_ENCRYPTION_SHIFT 16
#define ROUTE_TX 0x0000F000
#define ROUTE_TX_SHIFT 12
#define ROUTE_RX1 0x00000F00
#define ROUTE_RX1_SHIFT 8
#define ROUTE_RX2 0x000000F0
#define ROUTE_RX2_SHIFT 4
#define ROUTE_COMPRESSION 0x0000000F
#define ROUTE_COMPRESSION_SHIFT 0
__be32 _11g0; /* 11g */
__be32 _11g1; /* 11g */
__be32 _11b0; /* 11b */
__be32 _11b1; /* 11b */
u8 mac_hdr[32]; /* MAC header */
__be16 rts_duration; /* RTS duration */
__be16 last_duration; /* Last duration */
__be16 sec_last_duration; /* Second to Last duration */
__be16 other_duration; /* Other duration */
__be16 tx_last_duration; /* TX Last duration */
__be16 tx_other_duration; /* TX Other Duration */
__be16 last_11g_len; /* Length of last 11g */
__be16 other_11g_len; /* Lenght of other 11g */
__be16 last_11b_len; /* Length of last 11b */
__be16 other_11b_len; /* Lenght of other 11b */
__be16 reg6;
#define MBF 0xF000 /* mbf */
#define MBF_SHIFT 12
#define RSVD4 0x0FFF /* rsvd4 */
#define RSVD4_SHIFT 0
__be16 rx_frag_stat; /* RX fragmentation status */
__be32 time_stamp; /* TimeStamp */
__be32 phy_stats_hi; /* PHY stats hi */
__be32 phy_stats_lo; /* PHY stats lo */
__be32 mic_key0; /* MIC key 0 */
__be32 mic_key1; /* MIC key 1 */
union { /* RX/TX Union */
struct agnx_rx rx;
struct agnx_tx tx;
};
u8 rx_channel; /* Recieve Channel */
PAD_BYTES(3);
u8 reserved[4];
} __attribute__((__packed__));
struct agnx_desc {
#define PACKET_LEN 0xFFF00000
#define PACKET_LEN_SHIFT 20
/* ------------------------------------------------ */
#define FIRST_PACKET_MASK 0x00080000
#define FIRST_PACKET_MASK_SHIFT 19
#define FIRST_RESERV2 0x00040000
#define FIRST_RESERV2_SHIFT 18
#define FIRST_TKIP_ERROR 0x00020000
#define FIRST_TKIP_ERROR_SHIFT 17
#define FIRST_TKIP_PACKET 0x00010000
#define FIRST_TKIP_PACKET_SHIFT 16
#define FIRST_RESERV1 0x0000F000
#define FIRST_RESERV1_SHIFT 12
#define FIRST_FRAG_LEN 0x00000FF8
#define FIRST_FRAG_LEN_SHIFT 3
/* ------------------------------------------------ */
#define SUB_RESERV2 0x000c0000
#define SUB_RESERV2_SHIFT 18
#define SUB_TKIP_ERROR 0x00020000
#define SUB_TKIP_ERROR_SHIFT 17
#define SUB_TKIP_PACKET 0x00010000
#define SUB_TKIP_PACKET_SHIFT 16
#define SUB_RESERV1 0x00008000
#define SUB_RESERV1_SHIFT 15
#define SUB_FRAG_LEN 0x00007FF8
#define SUB_FRAG_LEN_SHIFT 3
/* ------------------------------------------------ */
#define FIRST_FRAG 0x00000004
#define FIRST_FRAG_SHIFT 2
#define LAST_FRAG 0x00000002
#define LAST_FRAG_SHIFT 1
#define OWNER 0x00000001
#define OWNER_SHIFT 0
__be32 frag;
__be32 dma_addr;
} __attribute__((__packed__));
enum {HEADER, PACKET};
struct agnx_info {
struct sk_buff *skb;
dma_addr_t mapping;
u32 dma_len; /* dma buffer len */
/* Below fields only usful for tx */
u32 hdr_len; /* ieee80211 header length */
unsigned int type;
struct ieee80211_tx_info *txi;
struct ieee80211_hdr hdr;
};
struct agnx_ring {
struct agnx_desc *desc;
dma_addr_t dma;
struct agnx_info *info;
/* Will lead to overflow when sent packet number enough? */
unsigned int idx;
unsigned int idx_sent; /* only usful for txd and txm */
unsigned int size;
};
#define AGNX_RX_RING_SIZE 128
#define AGNX_TXD_RING_SIZE 256
#define AGNX_TXM_RING_SIZE 128
void disable_rx_interrupt(struct agnx_priv *priv);
void enable_rx_interrupt(struct agnx_priv *priv);
int fill_rings(struct agnx_priv *priv);
void unfill_rings(struct agnx_priv *priv);
void handle_rx_irq(struct agnx_priv *priv);
void handle_txd_irq(struct agnx_priv *priv);
void handle_txm_irq(struct agnx_priv *priv);
void handle_other_irq(struct agnx_priv *priv);
int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb);
#endif /* AGNX_XMIT_H_ */

View File

@ -0,0 +1,10 @@
config ALTERA_PCIE_CHDMA
tristate "Altera PCI Express Chaining DMA driver"
depends on PCI
default N
---help---
A reference driver that exercises the Chaining DMA logic reference
design generated along the Altera FPGA PCI Express soft or hard core,
only if instantiated using the MegaWizard, not the SOPC builder, of
Quartus 8.1.

View File

@ -0,0 +1,2 @@
obj-$(CONFIG_ALTERA_PCIE_CHDMA) += altpciechdma.o

View File

@ -0,0 +1,15 @@
DONE:
- functionality similar to logic testbench
TODO:
- checkpatch.pl cleanups.
- keep state of DMA engines.
- keep data structure that keeps state of each transfer.
- interrupt handler should iterate over outstanding descriptor tables.
- complete userspace cdev to read/write using the DMA engines.
- split off the DMA support functions in a module, re-usable by custom
drivers.
Please coordinate work with, and send patches to
Leon Woestenberg <leon@sidebranch.com>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
menu "Android"
config ANDROID
bool "Android Drivers"
default N
---help---
Enable support for various drivers needed on the Android platform
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
default n
config ANDROID_LOGGER
tristate "Android log driver"
default n
config ANDROID_RAM_CONSOLE
bool "Android RAM buffer console"
default n
config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
bool "Enable verbose console messages on Android RAM console"
default y
depends on ANDROID_RAM_CONSOLE
menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
bool "Android RAM Console Enable error correction"
default n
depends on ANDROID_RAM_CONSOLE
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
int "Android RAM Console Data data size"
default 128
help
Must be a power of 2.
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
int "Android RAM Console ECC size"
default 16
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
int "Android RAM Console Symbol size"
default 8
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
hex "Android RAM Console Polynomial"
default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_EARLY_INIT
bool "Start Android RAM console early"
default n
depends on ANDROID_RAM_CONSOLE
config ANDROID_RAM_CONSOLE_EARLY_ADDR
hex "Android RAM console virtual address"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_RAM_CONSOLE_EARLY_SIZE
hex "Android RAM console buffer size"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
depends on GENERIC_GPIO
default n
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
default N
---help---
Register processes to be killed when memory is low
endmenu

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o

View File

@ -0,0 +1,10 @@
TODO:
- checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- add proper arch dependancies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Brian Swetland <swetland@google.com>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,330 @@
/*
* Copyright (C) 2008 Google, Inc.
*
* Based on, but no longer compatible with, the original
* OpenBinder.org binder driver interface, which is:
*
* Copyright (c) 2005 Palmsource, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
};
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur. The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses apropriately.
*/
struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
signed long read_size; /* bytes to read */
signed long read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
signed long protocol_version;
};
/* This is the current protocol version. */
#define BINDER_CURRENT_PROTOCOL_VERSION 7
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
#define BINDER_THREAD_EXIT _IOW('b', 8, int)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
/*
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
* ECONNREFUSED -- The driver is no longer accepting operations
* from your process. That is, the process is being destroyed.
* You should handle this by exiting from your process. Note
* that once this error code is returned, all further calls to
* the driver from any thread will return this same code.
*/
enum transaction_flags {
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
};
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
size_t handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
unsigned int flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
const void *buffer;
/* offsets from buffer to flat_binder_object structs */
const void *offsets;
} ptr;
uint8_t buf[8];
} data;
};
struct binder_ptr_cookie {
void *ptr;
void *cookie;
};
struct binder_pri_desc {
int priority;
int desc;
};
struct binder_pri_ptr_cookie {
int priority;
void *ptr;
void *cookie;
};
enum BinderDriverReturnProtocol {
BR_ERROR = _IOR('r', 0, int),
/*
* int: error code
*/
BR_OK = _IO('r', 1),
/* No parameters! */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, int),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
* Else the remote object has acquired a primary reference.
*/
BR_DEAD_REPLY = _IO('r', 5),
/*
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
*/
BR_TRANSACTION_COMPLETE = _IO('r', 6),
/*
* No parameters... always refers to the last transaction requested
* (including replies). Note that this will be sent even for
* asynchronous transactions.
*/
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
/*
* not currently supported
* int: priority
* void *: ptr to binder
* void *: cookie for binder
*/
BR_NOOP = _IO('r', 12),
/*
* No parameters. Do nothing and examine the next command. It exists
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
*/
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
* threads waiting to service incomming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
BR_FINISHED = _IO('r', 14),
/*
* not currently supported
* stop threadpool thread
*/
BR_DEAD_BINDER = _IOR('r', 15, void *),
/*
* void *: cookie
*/
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
/*
* void *: cookie
*/
BR_FAILED_REPLY = _IO('r', 17),
/*
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
enum BinderDriverCommandProtocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
* binder_transaction_data: the sent command.
*/
BC_ACQUIRE_RESULT = _IOW('c', 2, int),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
BC_FREE_BUFFER = _IOW('c', 3, int),
/*
* void *: ptr to transaction data received on a read
*/
BC_INCREFS = _IOW('c', 4, int),
BC_ACQUIRE = _IOW('c', 5, int),
BC_RELEASE = _IOW('c', 6, int),
BC_DECREFS = _IOW('c', 7, int),
/*
* int: descriptor
*/
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
/*
* not currently supported
* int: priority
* int: descriptor
*/
BC_REGISTER_LOOPER = _IO('c', 11),
/*
* No parameters.
* Register a spawned looper thread with the device.
*/
BC_ENTER_LOOPER = _IO('c', 12),
BC_EXIT_LOOPER = _IO('c', 13),
/*
* No parameters.
* These two commands are sent as an application-level thread
* enters and exits the binder loop, respectively. They are
* used so the binder can have an accurate count of the number
* of looping threads it has available.
*/
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
/*
* void *: cookie
*/
};
#endif /* _LINUX_BINDER_H */

View File

@ -0,0 +1,607 @@
/*
* drivers/misc/logger.c
*
* A Logging Subsystem
*
* Copyright (C) 2007-2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/time.h>
#include "logger.h"
#include <asm/ioctls.h>
/*
* struct logger_log - represents a specific log, such as 'main' or 'radio'
*
* This structure lives from module insertion until module removal, so it does
* not need additional reference counting. The structure is protected by the
* mutex 'mutex'.
*/
struct logger_log {
unsigned char * buffer; /* the ring buffer itself */
struct miscdevice misc; /* misc device representing the log */
wait_queue_head_t wq; /* wait queue for readers */
struct list_head readers; /* this log's readers */
struct mutex mutex; /* mutex protecting buffer */
size_t w_off; /* current write head offset */
size_t head; /* new readers start here */
size_t size; /* size of the log */
};
/*
* struct logger_reader - a logging device open for reading
*
* This object lives from open to release, so we don't need additional
* reference counting. The structure is protected by log->mutex.
*/
struct logger_reader {
struct logger_log * log; /* associated log */
struct list_head list; /* entry in logger_log's list */
size_t r_off; /* current read head offset */
};
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
#define logger_offset(n) ((n) & (log->size - 1))
/*
* file_get_log - Given a file structure, return the associated log
*
* This isn't aesthetic. We have several goals:
*
* 1) Need to quickly obtain the associated log during an I/O operation
* 2) Readers need to maintain state (logger_reader)
* 3) Writers need to be very fast (open() should be a near no-op)
*
* In the reader case, we can trivially go file->logger_reader->logger_log.
* For a writer, we don't want to maintain a logger_reader, so we just go
* file->logger_log. Thus what file->private_data points at depends on whether
* or not the file was opened for reading. This function hides that dirtiness.
*/
static inline struct logger_log * file_get_log(struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
return reader->log;
} else
return file->private_data;
}
/*
* get_entry_len - Grabs the length of the payload of the next entry starting
* from 'off'.
*
* Caller needs to hold log->mutex.
*/
static __u32 get_entry_len(struct logger_log *log, size_t off)
{
__u16 val;
switch (log->size - off) {
case 1:
memcpy(&val, log->buffer + off, 1);
memcpy(((char *) &val) + 1, log->buffer, 1);
break;
default:
memcpy(&val, log->buffer + off, 2);
}
return sizeof(struct logger_entry) + val;
}
/*
* do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
* user-space buffer 'buf'. Returns 'count' on success.
*
* Caller must hold log->mutex.
*/
static ssize_t do_read_log_to_user(struct logger_log *log,
struct logger_reader *reader,
char __user *buf,
size_t count)
{
size_t len;
/*
* We read from the log in two disjoint operations. First, we read from
* the current read head offset up to 'count' bytes or to the end of
* the log, whichever comes first.
*/
len = min(count, log->size - reader->r_off);
if (copy_to_user(buf, log->buffer + reader->r_off, len))
return -EFAULT;
/*
* Second, we read any remaining bytes, starting back at the head of
* the log.
*/
if (count != len)
if (copy_to_user(buf + len, log->buffer, count - len))
return -EFAULT;
reader->r_off = logger_offset(reader->r_off + count);
return count;
}
/*
* logger_read - our log's read() method
*
* Behavior:
*
* - O_NONBLOCK works
* - If there are no log entries to read, blocks until log is written to
* - Atomically reads exactly one log entry
*
* Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
* buffer is insufficient to hold next entry.
*/
static ssize_t logger_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct logger_reader *reader = file->private_data;
struct logger_log *log = reader->log;
ssize_t ret;
DEFINE_WAIT(wait);
start:
while (1) {
prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&log->mutex);
ret = (log->w_off == reader->r_off);
mutex_unlock(&log->mutex);
if (!ret)
break;
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&log->wq, &wait);
if (ret)
return ret;
mutex_lock(&log->mutex);
/* is there still something to read or did we race? */
if (unlikely(log->w_off == reader->r_off)) {
mutex_unlock(&log->mutex);
goto start;
}
/* get the size of the next entry */
ret = get_entry_len(log, reader->r_off);
if (count < ret) {
ret = -EINVAL;
goto out;
}
/* get exactly one entry from the log */
ret = do_read_log_to_user(log, reader, buf, ret);
out:
mutex_unlock(&log->mutex);
return ret;
}
/*
* get_next_entry - return the offset of the first valid entry at least 'len'
* bytes after 'off'.
*
* Caller must hold log->mutex.
*/
static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
{
size_t count = 0;
do {
size_t nr = get_entry_len(log, off);
off = logger_offset(off + nr);
count += nr;
} while (count < len);
return off;
}
/*
* clock_interval - is a < c < b in mod-space? Put another way, does the line
* from a to b cross c?
*/
static inline int clock_interval(size_t a, size_t b, size_t c)
{
if (b < a) {
if (a < c || b >= c)
return 1;
} else {
if (a < c && b >= c)
return 1;
}
return 0;
}
/*
* fix_up_readers - walk the list of all readers and "fix up" any who were
* lapped by the writer; also do the same for the default "start head".
* We do this by "pulling forward" the readers and start head to the first
* entry after the new write head.
*
* The caller needs to hold log->mutex.
*/
static void fix_up_readers(struct logger_log *log, size_t len)
{
size_t old = log->w_off;
size_t new = logger_offset(old + len);
struct logger_reader *reader;
if (clock_interval(old, new, log->head))
log->head = get_next_entry(log, log->head, len);
list_for_each_entry(reader, &log->readers, list)
if (clock_interval(old, new, reader->r_off))
reader->r_off = get_next_entry(log, reader->r_off, len);
}
/*
* do_write_log - writes 'len' bytes from 'buf' to 'log'
*
* The caller needs to hold log->mutex.
*/
static void do_write_log(struct logger_log *log, const void *buf, size_t count)
{
size_t len;
len = min(count, log->size - log->w_off);
memcpy(log->buffer + log->w_off, buf, len);
if (count != len)
memcpy(log->buffer, buf + len, count - len);
log->w_off = logger_offset(log->w_off + count);
}
/*
* do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
* the log 'log'
*
* The caller needs to hold log->mutex.
*
* Returns 'count' on success, negative error code on failure.
*/
static ssize_t do_write_log_from_user(struct logger_log *log,
const void __user *buf, size_t count)
{
size_t len;
len = min(count, log->size - log->w_off);
if (len && copy_from_user(log->buffer + log->w_off, buf, len))
return -EFAULT;
if (count != len)
if (copy_from_user(log->buffer, buf + len, count - len))
return -EFAULT;
log->w_off = logger_offset(log->w_off + count);
return count;
}
/*
* logger_aio_write - our write method, implementing support for write(),
* writev(), and aio_write(). Writes are our fast path, and we try to optimize
* them above all else.
*/
ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
size_t orig = log->w_off;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
now = current_kernel_time();
header.pid = current->tgid;
header.tid = current->pid;
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
/* null writes succeed, return zero */
if (unlikely(!header.len))
return 0;
mutex_lock(&log->mutex);
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
* because if we partially fail, we can end up with clobbered log
* entries that encroach on readable buffer.
*/
fix_up_readers(log, sizeof(struct logger_entry) + header.len);
do_write_log(log, &header, sizeof(struct logger_entry));
while (nr_segs-- > 0) {
size_t len;
ssize_t nr;
/* figure out how much of this vector we can keep */
len = min_t(size_t, iov->iov_len, header.len - ret);
/* write out this segment's payload */
nr = do_write_log_from_user(log, iov->iov_base, len);
if (unlikely(nr < 0)) {
log->w_off = orig;
mutex_unlock(&log->mutex);
return nr;
}
iov++;
ret += nr;
}
mutex_unlock(&log->mutex);
/* wake up any blocked readers */
wake_up_interruptible(&log->wq);
return ret;
}
static struct logger_log * get_log_from_minor(int);
/*
* logger_open - the log's open() file operation
*
* Note how near a no-op this is in the write-only case. Keep it that way!
*/
static int logger_open(struct inode *inode, struct file *file)
{
struct logger_log *log;
int ret;
ret = nonseekable_open(inode, file);
if (ret)
return ret;
log = get_log_from_minor(MINOR(inode->i_rdev));
if (!log)
return -ENODEV;
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader;
reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
if (!reader)
return -ENOMEM;
reader->log = log;
INIT_LIST_HEAD(&reader->list);
mutex_lock(&log->mutex);
reader->r_off = log->head;
list_add_tail(&reader->list, &log->readers);
mutex_unlock(&log->mutex);
file->private_data = reader;
} else
file->private_data = log;
return 0;
}
/*
* logger_release - the log's release file operation
*
* Note this is a total no-op in the write-only case. Keep it that way!
*/
static int logger_release(struct inode *ignored, struct file *file)
{
if (file->f_mode & FMODE_READ) {
struct logger_reader *reader = file->private_data;
list_del(&reader->list);
kfree(reader);
}
return 0;
}
/*
* logger_poll - the log's poll file operation, for poll/select/epoll
*
* Note we always return POLLOUT, because you can always write() to the log.
* Note also that, strictly speaking, a return value of POLLIN does not
* guarantee that the log is readable without blocking, as there is a small
* chance that the writer can lap the reader in the interim between poll()
* returning and the read() request.
*/
static unsigned int logger_poll(struct file *file, poll_table *wait)
{
struct logger_reader *reader;
struct logger_log *log;
unsigned int ret = POLLOUT | POLLWRNORM;
if (!(file->f_mode & FMODE_READ))
return ret;
reader = file->private_data;
log = reader->log;
poll_wait(file, &log->wq, wait);
mutex_lock(&log->mutex);
if (log->w_off != reader->r_off)
ret |= POLLIN | POLLRDNORM;
mutex_unlock(&log->mutex);
return ret;
}
static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct logger_log *log = file_get_log(file);
struct logger_reader *reader;
long ret = -ENOTTY;
mutex_lock(&log->mutex);
switch (cmd) {
case LOGGER_GET_LOG_BUF_SIZE:
ret = log->size;
break;
case LOGGER_GET_LOG_LEN:
if (!(file->f_mode & FMODE_READ)) {
ret = -EBADF;
break;
}
reader = file->private_data;
if (log->w_off >= reader->r_off)
ret = log->w_off - reader->r_off;
else
ret = (log->size - reader->r_off) + log->w_off;
break;
case LOGGER_GET_NEXT_ENTRY_LEN:
if (!(file->f_mode & FMODE_READ)) {
ret = -EBADF;
break;
}
reader = file->private_data;
if (log->w_off != reader->r_off)
ret = get_entry_len(log, reader->r_off);
else
ret = 0;
break;
case LOGGER_FLUSH_LOG:
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EBADF;
break;
}
list_for_each_entry(reader, &log->readers, list)
reader->r_off = log->w_off;
log->head = log->w_off;
ret = 0;
break;
}
mutex_unlock(&log->mutex);
return ret;
}
static struct file_operations logger_fops = {
.owner = THIS_MODULE,
.read = logger_read,
.aio_write = logger_aio_write,
.poll = logger_poll,
.unlocked_ioctl = logger_ioctl,
.compat_ioctl = logger_ioctl,
.open = logger_open,
.release = logger_release,
};
/*
* Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
* must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
* LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
*/
#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
static unsigned char _buf_ ## VAR[SIZE]; \
static struct logger_log VAR = { \
.buffer = _buf_ ## VAR, \
.misc = { \
.minor = MISC_DYNAMIC_MINOR, \
.name = NAME, \
.fops = &logger_fops, \
.parent = NULL, \
}, \
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
.readers = LIST_HEAD_INIT(VAR .readers), \
.mutex = __MUTEX_INITIALIZER(VAR .mutex), \
.w_off = 0, \
.head = 0, \
.size = SIZE, \
};
DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
static struct logger_log * get_log_from_minor(int minor)
{
if (log_main.misc.minor == minor)
return &log_main;
if (log_events.misc.minor == minor)
return &log_events;
if (log_radio.misc.minor == minor)
return &log_radio;
return NULL;
}
static int __init init_log(struct logger_log *log)
{
int ret;
ret = misc_register(&log->misc);
if (unlikely(ret)) {
printk(KERN_ERR "logger: failed to register misc "
"device for log '%s'!\n", log->misc.name);
return ret;
}
printk(KERN_INFO "logger: created %luK log '%s'\n",
(unsigned long) log->size >> 10, log->misc.name);
return 0;
}
static int __init logger_init(void)
{
int ret;
ret = init_log(&log_main);
if (unlikely(ret))
goto out;
ret = init_log(&log_events);
if (unlikely(ret))
goto out;
ret = init_log(&log_radio);
if (unlikely(ret))
goto out;
out:
return ret;
}
device_initcall(logger_init);

View File

@ -0,0 +1,48 @@
/* include/linux/logger.h
*
* Copyright (C) 2007-2008 Google, Inc.
* Author: Robert Love <rlove@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_LOGGER_H
#define _LINUX_LOGGER_H
#include <linux/types.h>
#include <linux/ioctl.h>
struct logger_entry {
__u16 len; /* length of the payload */
__u16 __pad; /* no matter what, we get 2 bytes of padding */
__s32 pid; /* generating process's pid */
__s32 tid; /* generating process's tid */
__s32 sec; /* seconds since Epoch */
__s32 nsec; /* nanoseconds */
char msg[0]; /* the entry's payload */
};
#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
#define LOGGER_LOG_MAIN "log_main" /* everything else */
#define LOGGER_ENTRY_MAX_LEN (4*1024)
#define LOGGER_ENTRY_MAX_PAYLOAD \
(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
#define __LOGGERIO 0xAE
#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
#endif /* _LINUX_LOGGER_H */

View File

@ -0,0 +1,119 @@
/* drivers/misc/lowmemorykiller.c
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask);
static struct shrinker lowmem_shrinker = {
.shrink = lowmem_shrink,
.seeks = DEFAULT_SEEKS * 16
};
static uint32_t lowmem_debug_level = 2;
static int lowmem_adj[6] = {
0,
1,
6,
12,
};
static int lowmem_adj_size = 4;
static size_t lowmem_minfree[6] = {
3*512, // 6MB
2*1024, // 8MB
4*1024, // 16MB
16*1024, // 64MB
};
static int lowmem_minfree_size = 4;
#define lowmem_print(level, x...) do { if(lowmem_debug_level >= (level)) printk(x); } while(0)
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, S_IRUGO | S_IWUSR);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
struct task_struct *p;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_adj = OOM_ADJUST_MAX + 1;
int selected_tasksize = 0;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) + global_page_state(NR_FILE_PAGES);
if(lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if(lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for(i = 0; i < array_size; i++) {
if(other_free < lowmem_minfree[i]) {
min_adj = lowmem_adj[i];
break;
}
}
if(nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %d, %x, ofree %d, ma %d\n", nr_to_scan, gfp_mask, other_free, min_adj);
read_lock(&tasklist_lock);
for_each_process(p) {
if(p->oomkilladj >= 0 && p->mm) {
tasksize = get_mm_rss(p->mm);
if(nr_to_scan > 0 && tasksize > 0 && p->oomkilladj >= min_adj) {
if(selected == NULL ||
p->oomkilladj > selected->oomkilladj ||
(p->oomkilladj == selected->oomkilladj &&
tasksize > selected_tasksize)) {
selected = p;
selected_tasksize = tasksize;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, p->oomkilladj, tasksize);
}
}
rem += tasksize;
}
}
if(selected != NULL) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected->oomkilladj, selected_tasksize);
force_sig(SIGKILL, selected);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem);
read_unlock(&tasklist_lock);
return rem;
}
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
return 0;
}
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
}
module_init(lowmem_init);
module_exit(lowmem_exit);
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,395 @@
/* drivers/android/ram_console.c
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
#include <linux/rslib.h>
#endif
struct ram_console_buffer {
uint32_t sig;
uint32_t start;
uint32_t size;
uint8_t data[0];
};
#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
static char __initdata
ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
#endif
static char *ram_console_old_log;
static size_t ram_console_old_log_size;
static struct ram_console_buffer *ram_console_buffer;
static size_t ram_console_buffer_size;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
static char *ram_console_par_buffer;
static struct rs_control *ram_console_rs_decoder;
static int ram_console_corrected_bytes;
static int ram_console_bad_blocks;
#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
#endif
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
{
int i;
uint16_t par[ECC_SIZE];
/* Initialize the parity buffer */
memset(par, 0, sizeof(par));
encode_rs8(ram_console_rs_decoder, data, len, par, 0);
for (i = 0; i < ECC_SIZE; i++)
ecc[i] = par[i];
}
static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
{
int i;
uint16_t par[ECC_SIZE];
for (i = 0; i < ECC_SIZE; i++)
par[i] = ecc[i];
return decode_rs8(ram_console_rs_decoder, data, par, len,
NULL, 0, NULL, 0, NULL);
}
#endif
static void ram_console_update(const char *s, unsigned int count)
{
struct ram_console_buffer *buffer = ram_console_buffer;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
uint8_t *block;
uint8_t *par;
int size = ECC_BLOCK_SIZE;
#endif
memcpy(buffer->data + buffer->start, s, count);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
par = ram_console_par_buffer +
(buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
do {
if (block + ECC_BLOCK_SIZE > buffer_end)
size = buffer_end - block;
ram_console_encode_rs8(block, size, par);
block += ECC_BLOCK_SIZE;
par += ECC_SIZE;
} while (block < buffer->data + buffer->start + count);
#endif
}
static void ram_console_update_header(void)
{
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
struct ram_console_buffer *buffer = ram_console_buffer;
uint8_t *par;
par = ram_console_par_buffer +
DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
#endif
}
static void
ram_console_write(struct console *console, const char *s, unsigned int count)
{
int rem;
struct ram_console_buffer *buffer = ram_console_buffer;
if (count > ram_console_buffer_size) {
s += count - ram_console_buffer_size;
count = ram_console_buffer_size;
}
rem = ram_console_buffer_size - buffer->start;
if (rem < count) {
ram_console_update(s, rem);
s += rem;
count -= rem;
buffer->start = 0;
buffer->size = ram_console_buffer_size;
}
ram_console_update(s, count);
buffer->start += count;
if (buffer->size < ram_console_buffer_size)
buffer->size += count;
ram_console_update_header();
}
static struct console ram_console = {
.name = "ram",
.write = ram_console_write,
.flags = CON_PRINTBUFFER | CON_ENABLED,
.index = -1,
};
static void __init
ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
{
size_t old_log_size = buffer->size;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
uint8_t *block;
uint8_t *par;
char strbuf[80];
int strbuf_len;
block = buffer->data;
par = ram_console_par_buffer;
while (block < buffer->data + buffer->size) {
int numerr;
int size = ECC_BLOCK_SIZE;
if (block + size > buffer->data + ram_console_buffer_size)
size = buffer->data + ram_console_buffer_size - block;
numerr = ram_console_decode_rs8(block, size, par);
if (numerr > 0) {
#if 0
printk(KERN_INFO "ram_console: error in block %p, %d\n",
block, numerr);
#endif
ram_console_corrected_bytes += numerr;
} else if (numerr < 0) {
#if 0
printk(KERN_INFO "ram_console: uncorrectable error in "
"block %p\n", block);
#endif
ram_console_bad_blocks++;
}
block += ECC_BLOCK_SIZE;
par += ECC_SIZE;
}
if (ram_console_corrected_bytes || ram_console_bad_blocks)
strbuf_len = snprintf(strbuf, sizeof(strbuf),
"\n%d Corrected bytes, %d unrecoverable blocks\n",
ram_console_corrected_bytes, ram_console_bad_blocks);
else
strbuf_len = snprintf(strbuf, sizeof(strbuf),
"\nNo errors detected\n");
if (strbuf_len >= sizeof(strbuf))
strbuf_len = sizeof(strbuf) - 1;
old_log_size += strbuf_len;
#endif
if (dest == NULL) {
dest = kmalloc(old_log_size, GFP_KERNEL);
if (dest == NULL) {
printk(KERN_ERR
"ram_console: failed to allocate buffer\n");
return;
}
}
ram_console_old_log = dest;
ram_console_old_log_size = old_log_size;
memcpy(ram_console_old_log,
&buffer->data[buffer->start], buffer->size - buffer->start);
memcpy(ram_console_old_log + buffer->size - buffer->start,
&buffer->data[0], buffer->start);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
memcpy(ram_console_old_log + old_log_size - strbuf_len,
strbuf, strbuf_len);
#endif
}
static int __init ram_console_init(struct ram_console_buffer *buffer,
size_t buffer_size, char *old_buf)
{
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
int numerr;
uint8_t *par;
#endif
ram_console_buffer = buffer;
ram_console_buffer_size =
buffer_size - sizeof(struct ram_console_buffer);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
ram_console_par_buffer = buffer->data + ram_console_buffer_size;
/* first consecutive root is 0
* primitive element to generate roots = 1
*/
ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
if (ram_console_rs_decoder == NULL) {
printk(KERN_INFO "ram_console: init_rs failed\n");
return 0;
}
ram_console_corrected_bytes = 0;
ram_console_bad_blocks = 0;
par = ram_console_par_buffer +
DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
if (numerr > 0) {
printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
ram_console_corrected_bytes += numerr;
} else if (numerr < 0) {
printk(KERN_INFO
"ram_console: uncorrectable error in header\n");
ram_console_bad_blocks++;
}
#endif
if (buffer->sig == RAM_CONSOLE_SIG) {
if (buffer->size > ram_console_buffer_size
|| buffer->start > buffer->size)
printk(KERN_INFO "ram_console: found existing invalid "
"buffer, size %d, start %d\n",
buffer->size, buffer->start);
else {
printk(KERN_INFO "ram_console: found existing buffer, "
"size %d, start %d\n",
buffer->size, buffer->start);
ram_console_save_old(buffer, old_buf);
}
} else {
printk(KERN_INFO "ram_console: no valid data in buffer "
"(sig = 0x%08x)\n", buffer->sig);
}
buffer->sig = RAM_CONSOLE_SIG;
buffer->start = 0;
buffer->size = 0;
register_console(&ram_console);
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
console_verbose();
#endif
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
static int __init ram_console_early_init(void)
{
return ram_console_init((struct ram_console_buffer *)
CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
ram_console_old_log_init_buffer);
}
#else
static int ram_console_driver_probe(struct platform_device *pdev)
{
struct resource *res = pdev->resource;
size_t start;
size_t buffer_size;
void *buffer;
if (res == NULL || pdev->num_resources != 1 ||
!(res->flags & IORESOURCE_MEM)) {
printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
"%lx\n", res, pdev->num_resources, res ? res->flags : 0);
return -ENXIO;
}
buffer_size = res->end - res->start + 1;
start = res->start;
printk(KERN_INFO "ram_console: got buffer at %x, size %x\n",
start, buffer_size);
buffer = ioremap(res->start, buffer_size);
if (buffer == NULL) {
printk(KERN_ERR "ram_console: failed to map memory\n");
return -ENOMEM;
}
return ram_console_init(buffer, buffer_size, NULL/* allocate */);
}
static struct platform_driver ram_console_driver = {
.probe = ram_console_driver_probe,
.driver = {
.name = "ram_console",
},
};
static int __init ram_console_module_init(void)
{
int err;
err = platform_driver_register(&ram_console_driver);
return err;
}
#endif
static ssize_t ram_console_read_old(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
loff_t pos = *offset;
ssize_t count;
if (pos >= ram_console_old_log_size)
return 0;
count = min(len, (size_t)(ram_console_old_log_size - pos));
if (copy_to_user(buf, ram_console_old_log + pos, count))
return -EFAULT;
*offset += count;
return count;
}
static struct file_operations ram_console_file_ops = {
.owner = THIS_MODULE,
.read = ram_console_read_old,
};
static int __init ram_console_late_init(void)
{
struct proc_dir_entry *entry;
if (ram_console_old_log == NULL)
return 0;
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
if (ram_console_old_log == NULL) {
printk(KERN_ERR
"ram_console: failed to allocate buffer for old log\n");
ram_console_old_log_size = 0;
return 0;
}
memcpy(ram_console_old_log,
ram_console_old_log_init_buffer, ram_console_old_log_size);
#endif
entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
if (!entry) {
printk(KERN_ERR "ram_console: failed to create proc entry\n");
kfree(ram_console_old_log);
ram_console_old_log = NULL;
return 0;
}
entry->proc_fops = &ram_console_file_ops;
entry->size = ram_console_old_log_size;
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
console_initcall(ram_console_early_init);
#else
module_init(ram_console_module_init);
#endif
late_initcall(ram_console_late_init);

View File

@ -0,0 +1,177 @@
/* drivers/misc/timed_gpio.c
*
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <lockwood@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/hrtimer.h>
#include <linux/err.h>
#include <asm/arch/gpio.h>
#include "timed_gpio.h"
static struct class *timed_gpio_class;
struct timed_gpio_data {
struct device *dev;
struct hrtimer timer;
spinlock_t lock;
unsigned gpio;
int max_timeout;
u8 active_low;
};
static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
{
struct timed_gpio_data *gpio_data = container_of(timer, struct timed_gpio_data, timer);
gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? 1 : 0);
return HRTIMER_NORESTART;
}
static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
int remaining;
if (hrtimer_active(&gpio_data->timer)) {
ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000;
} else
remaining = 0;
return sprintf(buf, "%d\n", remaining);
}
static ssize_t gpio_enable_store(
struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
int value;
unsigned long flags;
sscanf(buf, "%d", &value);
spin_lock_irqsave(&gpio_data->lock, flags);
/* cancel previous timer and set GPIO according to value */
hrtimer_cancel(&gpio_data->timer);
gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? !value : !!value);
if (value > 0) {
if (value > gpio_data->max_timeout)
value = gpio_data->max_timeout;
hrtimer_start(&gpio_data->timer,
ktime_set(value / 1000, (value % 1000) * 1000000),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&gpio_data->lock, flags);
return size;
}
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, gpio_enable_show, gpio_enable_store);
static int timed_gpio_probe(struct platform_device *pdev)
{
struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
struct timed_gpio *cur_gpio;
struct timed_gpio_data *gpio_data, *gpio_dat;
int i, ret = 0;
if (!pdata)
return -EBUSY;
gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, GFP_KERNEL);
if (!gpio_data)
return -ENOMEM;
for (i = 0; i < pdata->num_gpios; i++) {
cur_gpio = &pdata->gpios[i];
gpio_dat = &gpio_data[i];
hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gpio_dat->timer.function = gpio_timer_func;
spin_lock_init(&gpio_dat->lock);
gpio_dat->gpio = cur_gpio->gpio;
gpio_dat->max_timeout = cur_gpio->max_timeout;
gpio_dat->active_low = cur_gpio->active_low;
gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
gpio_dat->dev = device_create(timed_gpio_class, &pdev->dev, 0, "%s", cur_gpio->name);
if (unlikely(IS_ERR(gpio_dat->dev)))
return PTR_ERR(gpio_dat->dev);
dev_set_drvdata(gpio_dat->dev, gpio_dat);
ret = device_create_file(gpio_dat->dev, &dev_attr_enable);
if (ret)
return ret;
}
platform_set_drvdata(pdev, gpio_data);
return 0;
}
static int timed_gpio_remove(struct platform_device *pdev)
{
struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
int i;
for (i = 0; i < pdata->num_gpios; i++) {
device_remove_file(gpio_data[i].dev, &dev_attr_enable);
device_unregister(gpio_data[i].dev);
}
kfree(gpio_data);
return 0;
}
static struct platform_driver timed_gpio_driver = {
.probe = timed_gpio_probe,
.remove = timed_gpio_remove,
.driver = {
.name = "timed-gpio",
.owner = THIS_MODULE,
},
};
static int __init timed_gpio_init(void)
{
timed_gpio_class = class_create(THIS_MODULE, "timed_output");
if (IS_ERR(timed_gpio_class))
return PTR_ERR(timed_gpio_class);
return platform_driver_register(&timed_gpio_driver);
}
static void __exit timed_gpio_exit(void)
{
class_destroy(timed_gpio_class);
platform_driver_unregister(&timed_gpio_driver);
}
module_init(timed_gpio_init);
module_exit(timed_gpio_exit);
MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
MODULE_DESCRIPTION("timed gpio driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,31 @@
/* include/linux/timed_gpio.h
*
* Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_TIMED_GPIO_H
#define _LINUX_TIMED_GPIO_H
struct timed_gpio {
const char *name;
unsigned gpio;
int max_timeout;
u8 active_low;
};
struct timed_gpio_platform_data {
int num_gpios;
struct timed_gpio *gpios;
};
#endif

View File

@ -0,0 +1,6 @@
config ASUS_OLED
tristate "Asus OLED driver"
depends on USB
default N
---help---
Enable support for the OLED display present in some Asus laptops.

View File

@ -0,0 +1 @@
obj-$(CONFIG_ASUS_OLED) += asus_oled.o

View File

@ -0,0 +1,156 @@
Driver for Asus OLED display present in some Asus laptops.
The code of this driver is based on 'asusoled' program taken from
https://launchpad.net/asusoled/. I just wanted to have a simple
kernel driver for controlling this device, but I didn't know how
to do that. Now I know ;) Also, that program can not be used
with usbhid loaded, which means no USB mouse/keyboard while
controlling OLED display :(
It has been tested on Asus G1 and didn't cause any problems,
but I don't guarantee that it won't do anything wrong :)
It can (and probably does) have errors. It is usable
in my case, and I hope others will find it useful too!
*******
Building the module
To build the module you need kernel 2.6 include files and some C compiler.
Just run:
make
make install (as a root)
It will build (hopefully) the module and install it in
/lib/modules/'uname -r'/extra/asus_oled.ko.
To load it just use:
modprobe asus_oled
You can check if it has detected your OLED display by looking into dmesg output.
There should be something like this:
asus-oled 2-7:1.0: Attached Asus OLED device
If it doesn't find your display, you can try removing usbhid module.
If you add asus_oled into the list of modules loaded during system boot
before usbhid, it will work even when usbhid is present.
If it still doesn't detect your hardware, check lsusb output.
There should be similar line:
Bus 002 Device 005: ID 0b05:1726 ASUSTek Computer, Inc.
If you don't see any lines with '0b05:1726' it means that you have different
type of hardware that is not detected (it may or may not work, but the driver
knows only '0b05:1726' device).
*******
Configuration
There is only one option: start_off.
You can use it by: 'modprobe asus_oled start_off=1', or by adding this
line to /etc/modprobe.conf:
options asus_oled start_off=1
With this option provided, asus_oled driver will switch off the display
when it is detected and attached. It is nice feature to just switch off the 'ASUS'
logo. If you don't use the display, it is probably the good idea to switch it off,
to protect OLEDs from "wearing off".
*******
Usage
This module can be controlled with two special files:
/sys/class/asus_oled/oled_N/enabled
/sys/class/asus_oled/oled_N/picture
(N is the device number, the first, and probably the only, has number 1,
so it is /sys/class/asus_oled/oled_1/enabled
and /sys/class/asus_oled/oled_1/picture)
'enabled' files is for reading and writing, 'picture' is writeable only.
You can write 0 or 1 to 'enabled' file, which will switch
on and off the display. Reading from this file will tell you the last
status set, either 0 or 1. By default it is 1, so if the device was set to 'off',
and the computer was rebooted without power-off, this file will contain wrong
value - because the device is off, but hasn't been disabled this time and is
assumed to be on...
To 'picture' file you write pictures to be displayed by the OLED device.
The format of the file:
<M:WxH>
00001110010111000
00010101010101010
....
First line is a configuration parameter. Meaning of fields in <M:WxH>:
M - picture mode. It can be either 's' for static pictures,
'r' for rolling pictures, and 'f' for flashing pictures.
W - width of the picture. May be between 1 and 1792
H - height of the picture. May be between 1 and 32
For example <s:128x32> means static picture, 128 pixels long and 32 pixels high.
The physical size of the display is 128x32 pixels. Static and flashing pictures
can't be larger than that (actually they can, but only part of them will be displayed ;) )
If the picture is smaller than 128x32 it will be centered. Rolling pictures wider than
128 pixels will be centered too, unless their width = n*128. Vertically they will be
centered just like static pictures, if their height is smaller than 32.
Flashing pictures will be centered horizontally if their width < 128, but they were
centered vertically in a different way. If their height < 16, they will be centered
in the upper half of the display (rows 0-15). This is because only the first half
of flashing pictures is used for flashing. When the picture with heigh = 32 is
displayed in flashing mode, its upper 16 rows will be flashing in the upper half
of the display, and the lower half will be empty. After few seconds upper part will
stop flashing (but that part of the picture will remain there), and the lower
half of the display will start displayin the lower half of the picture
in rolling mode, unless it is empty, or the picture was small enough to fit in
upper part. It is not mine idea, this is just the way Asus' display work ;)
So if you need just flashing, use at most 128x16 picture. If you need flashing and
rolling, use whole size of the display.
Lines following the first, configuration, line are picture data. Each '1' means
that the pixel is lit, and '0' means that it is not. You can also use '#' as ON,
and ' ' (space) as OFF. Empty lines and all other characters are ignored.
It is possible to write everything in one line <M:WxH>01010101010101010...,
and W*H characters will be used. If there is not enough characters, nothing will be
displayed. However, the 'line mode' is easier to read (and write), and it also
lets to omit parts of data. Whenever End-Of-Line character is found, but
the line is not W characters long, it is assumed that all missing characters
are equal to the last character in the line.
Following line represents '0', '1' and a lots of '0's, dependng on the width of the picture
provided in configuration data:
010
So if you need empty line, it is sufficient to write line with only one '0' in it.
The same works with '1' (or ' ' and '#').
If there are too many data in the file, they will be ignored. If you are not sure
how many characters you are missing, you can add few lines with one zero in each of them.
There are some example pictures in .txt format, that can be used as follows:
cat foo.txt > /sys/class/asus_oled/oled_1/picture
If the display is switched off you also need to run:
echo 1 > /sys/class/asus_oled/oled_1/enabled
To switch it off, just use:
echo 0 > /sys/class/asus_oled/oled_1/enabled
*******
For any additional info please have a look at http://lapsus.berlios.de/asus_oled.html
Jakub Schmidtke (sjakub@gmail.com)

View File

@ -0,0 +1,10 @@
TODO:
- checkpatch.pl cleanups
- sparse fixes
- audit the userspace interface
- sysfs vs. char?
- Documentation/ABI/ needs to be added
- put the sample .txt files and README file somewhere.
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
Cc: Jakub Schmidtke <sjakub@gmail.com>

View File

@ -0,0 +1,745 @@
/*
* Asus OLED USB driver
*
* Copyright (C) 2007,2008 Jakub Schmidtke (sjakub@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
*
* This module is based on usbled and asus-laptop modules.
*
*
* Asus OLED support is based on asusoled program taken from
* https://launchpad.net/asusoled/.
*
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/ctype.h>
#define ASUS_OLED_VERSION "0.04-dev"
#define ASUS_OLED_NAME "asus-oled"
#define ASUS_OLED_UNDERSCORE_NAME "asus_oled"
#define ASUS_OLED_ERROR "Asus OLED Display Error: "
#define ASUS_OLED_STATIC 's'
#define ASUS_OLED_ROLL 'r'
#define ASUS_OLED_FLASH 'f'
#define ASUS_OLED_MAX_WIDTH 1792
#define ASUS_OLED_DISP_HEIGHT 32
#define ASUS_OLED_PACKET_BUF_SIZE 256
MODULE_AUTHOR("Jakub Schmidtke, sjakub@gmail.com");
MODULE_DESCRIPTION("Asus OLED Driver v" ASUS_OLED_VERSION);
MODULE_LICENSE("GPL");
static struct class *oled_class = 0;
static int oled_num = 0;
static uint start_off = 0;
module_param(start_off, uint, 0644);
MODULE_PARM_DESC(start_off, "Set to 1 to switch off OLED display after it is attached");
typedef enum {
PACK_MODE_G1,
PACK_MODE_G50,
PACK_MODE_LAST
} oled_pack_mode_t;
struct oled_dev_desc_str {
uint16_t idVendor;
uint16_t idProduct;
uint16_t devWidth; // width of display
oled_pack_mode_t packMode; // formula to be used while packing the picture
const char *devDesc;
};
/* table of devices that work with this driver */
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0b05, 0x1726) }, // Asus G1/G2 (and variants)
{ USB_DEVICE(0x0b05, 0x175b) }, // Asus G50V (and possibly others - G70? G71?)
{ },
};
/* parameters of specific devices */
static struct oled_dev_desc_str oled_dev_desc_table [] = {
{ 0x0b05, 0x1726, 128, PACK_MODE_G1, "G1/G2" },
{ 0x0b05, 0x175b, 256, PACK_MODE_G50, "G50" },
{ },
};
MODULE_DEVICE_TABLE (usb, id_table);
#define SETUP_PACKET_HEADER(packet, val1, val2, val3, val4, val5, val6, val7) \
do { \
memset(packet, 0, sizeof(struct asus_oled_header)); \
packet->header.magic1 = 0x55; \
packet->header.magic2 = 0xaa; \
packet->header.flags = val1; \
packet->header.value3 = val2; \
packet->header.buffer1 = val3; \
packet->header.buffer2 = val4; \
packet->header.value6 = val5; \
packet->header.value7 = val6; \
packet->header.value8 = val7; \
} while(0);
struct asus_oled_header {
uint8_t magic1;
uint8_t magic2;
uint8_t flags;
uint8_t value3;
uint8_t buffer1;
uint8_t buffer2;
uint8_t value6;
uint8_t value7;
uint8_t value8;
uint8_t padding2[7];
} __attribute((packed));
struct asus_oled_packet {
struct asus_oled_header header;
uint8_t bitmap[ASUS_OLED_PACKET_BUF_SIZE];
} __attribute((packed));
struct asus_oled_dev {
struct usb_device * udev;
uint8_t pic_mode;
uint16_t dev_width;
oled_pack_mode_t pack_mode;
size_t height;
size_t width;
size_t x_shift;
size_t y_shift;
size_t buf_offs;
uint8_t last_val;
size_t buf_size;
char *buf;
uint8_t enabled;
struct device *dev;
};
static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
{
int a;
int retval;
int act_len;
struct asus_oled_packet * packet;
packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
if (!packet) {
dev_err(&odev->udev->dev, "out of memory\n");
return;
}
SETUP_PACKET_HEADER(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00);
if (enabl) packet->bitmap[0] = 0xaf;
else packet->bitmap[0] = 0xae;
for (a=0; a<1; a++) {
retval = usb_bulk_msg(odev->udev,
usb_sndbulkpipe(odev->udev, 2),
packet,
sizeof(struct asus_oled_header) + 1,
&act_len,
-1);
if (retval)
dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
}
odev->enabled = enabl;
kfree(packet);
}
static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
int temp = simple_strtoul(buf, NULL, 10);
enable_oled(odev, temp);
return count;
}
static ssize_t class_set_enabled(struct device *device, struct device_attribute *attr, const char *buf, size_t count)
{
struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device);
int temp = simple_strtoul(buf, NULL, 10);
enable_oled(odev, temp);
return count;
}
static ssize_t get_enabled(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
return sprintf(buf, "%d\n", odev->enabled);
}
static ssize_t class_get_enabled(struct device *device, struct device_attribute *attr, char *buf)
{
struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device);
return sprintf(buf, "%d\n", odev->enabled);
}
static void send_packets(struct usb_device *udev, struct asus_oled_packet *packet,
char *buf, uint8_t p_type, size_t p_num)
{
size_t i;
int act_len;
for (i = 0; i < p_num; i++) {
int retval;
switch (p_type) {
case ASUS_OLED_ROLL:
SETUP_PACKET_HEADER(packet, 0x40, 0x80, p_num, i + 1, 0x00, 0x01, 0xff);
break;
case ASUS_OLED_STATIC:
SETUP_PACKET_HEADER(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x01, 0x00);
break;
case ASUS_OLED_FLASH:
SETUP_PACKET_HEADER(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x00, 0xff);
break;
}
memcpy(packet->bitmap, buf + (ASUS_OLED_PACKET_BUF_SIZE*i), ASUS_OLED_PACKET_BUF_SIZE);
retval = usb_bulk_msg(udev,
usb_sndctrlpipe(udev, 2),
packet,
sizeof(struct asus_oled_packet),
&act_len,
-1);
if (retval)
dev_dbg(&udev->dev, "retval = %d\n", retval);
}
}
static void send_packet(struct usb_device *udev, struct asus_oled_packet *packet, size_t offset, size_t len, char *buf, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint8_t b6){
int retval;
int act_len;
SETUP_PACKET_HEADER(packet, b1, b2, b3, b4, b5, b6, 0x00);
memcpy(packet->bitmap, buf + offset, len);
retval = usb_bulk_msg(udev,
usb_sndctrlpipe(udev, 2),
packet,
sizeof(struct asus_oled_packet),
&act_len,
-1);
if (retval)
dev_dbg(&udev->dev, "retval = %d\n", retval);
}
static void send_packets_g50(struct usb_device *udev, struct asus_oled_packet *packet, char *buf)
{
send_packet(udev, packet, 0, 0x100, buf, 0x10, 0x00, 0x02, 0x01, 0x00, 0x01);
send_packet(udev, packet, 0x100, 0x080, buf, 0x10, 0x00, 0x02, 0x02, 0x80, 0x00);
send_packet(udev, packet, 0x180, 0x100, buf, 0x11, 0x00, 0x03, 0x01, 0x00, 0x01);
send_packet(udev, packet, 0x280, 0x100, buf, 0x11, 0x00, 0x03, 0x02, 0x00, 0x01);
send_packet(udev, packet, 0x380, 0x080, buf, 0x11, 0x00, 0x03, 0x03, 0x80, 0x00);
}
static void send_data(struct asus_oled_dev *odev)
{
size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE;
struct asus_oled_packet * packet;
packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
if (!packet) {
dev_err(&odev->udev->dev, "out of memory\n");
return;
}
if (odev->pack_mode==PACK_MODE_G1){
// When sending roll-mode data the display updated only first packet.
// I have no idea why, but when static picture is send just before
// rolling picture - everything works fine.
if (odev->pic_mode == ASUS_OLED_ROLL)
send_packets(odev->udev, packet, odev->buf, ASUS_OLED_STATIC, 2);
// Only ROLL mode can use more than 2 packets.
if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2)
packet_num = 2;
send_packets(odev->udev, packet, odev->buf, odev->pic_mode, packet_num);
}
else
if (odev->pack_mode==PACK_MODE_G50){
send_packets_g50(odev->udev, packet, odev->buf);
}
kfree(packet);
}
static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
{
while (count-- > 0) {
if (val) {
size_t x = odev->buf_offs % odev->width;
size_t y = odev->buf_offs / odev->width;
size_t i;
x += odev->x_shift;
y += odev->y_shift;
switch(odev->pack_mode)
{
case PACK_MODE_G1:
// i = (x/128)*640 + 127 - x + (y/8)*128;
// This one for 128 is the same, but might be better for different widths?
i = (x/odev->dev_width)*640 + odev->dev_width - 1 - x + (y/8)*odev->dev_width;
break;
case PACK_MODE_G50:
i = (odev->dev_width - 1 - x)/8 + y*odev->dev_width/8;
break;
default:
i = 0;
printk(ASUS_OLED_ERROR "Unknown OLED Pack Mode: %d!\n", odev->pack_mode);
break;
}
if (i >= odev->buf_size) {
printk(ASUS_OLED_ERROR "Buffer overflow! Report a bug in the driver: offs: %d >= %d i: %d (x: %d y: %d)\n",
(int) odev->buf_offs, (int) odev->buf_size, (int) i, (int) x, (int) y);
return -EIO;
}
switch (odev->pack_mode)
{
case PACK_MODE_G1:
odev->buf[i] &= ~(1<<(y%8));
break;
case PACK_MODE_G50:
odev->buf[i] &= ~(1<<(x%8));
break;
default:
// cannot get here; stops gcc complaining
;
}
}
odev->last_val = val;
odev->buf_offs++;
}
return 0;
}
static ssize_t odev_set_picture(struct asus_oled_dev *odev, const char *buf, size_t count)
{
size_t offs = 0, max_offs;
if (count < 1) return 0;
if (tolower(buf[0]) == 'b'){
// binary mode, set the entire memory
size_t i;
odev->buf_size = (odev->dev_width * ASUS_OLED_DISP_HEIGHT) / 8;
if (odev->buf) kfree(odev->buf);
odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
memset(odev->buf, 0xff, odev->buf_size);
for (i=1; i < count && i<=32*32; i++){
odev->buf[i-1] = buf[i];
odev->buf_offs = i-1;
}
odev->width=odev->dev_width / 8;
odev->height=ASUS_OLED_DISP_HEIGHT;
odev->x_shift=0;
odev->y_shift=0;
odev->last_val=0;
send_data(odev);
return count;
}
if (buf[0] == '<') {
size_t i;
size_t w = 0, h = 0;
size_t w_mem, h_mem;
if (count < 10 || buf[2] != ':') {
goto error_header;
}
switch(tolower(buf[1])) {
case ASUS_OLED_STATIC:
case ASUS_OLED_ROLL:
case ASUS_OLED_FLASH:
odev->pic_mode = buf[1];
break;
default:
printk(ASUS_OLED_ERROR "Wrong picture mode: '%c'.\n", buf[1]);
return -EIO;
break;
}
for (i = 3; i < count; ++i) {
if (buf[i] >= '0' && buf[i] <= '9') {
w = 10*w + (buf[i] - '0');
if (w > ASUS_OLED_MAX_WIDTH) goto error_width;
}
else if (tolower(buf[i]) == 'x') break;
else goto error_width;
}
for (++i; i < count; ++i) {
if (buf[i] >= '0' && buf[i] <= '9') {
h = 10*h + (buf[i] - '0');
if (h > ASUS_OLED_DISP_HEIGHT) goto error_height;
}
else if (tolower(buf[i]) == '>') break;
else goto error_height;
}
if (w < 1 || w > ASUS_OLED_MAX_WIDTH) goto error_width;
if (h < 1 || h > ASUS_OLED_DISP_HEIGHT) goto error_height;
if (i >= count || buf[i] != '>') goto error_header;
offs = i+1;
if (w % (odev->dev_width) != 0)
w_mem = (w/(odev->dev_width) + 1)*(odev->dev_width);
else
w_mem = w;
if (h < ASUS_OLED_DISP_HEIGHT)
h_mem = ASUS_OLED_DISP_HEIGHT;
else
h_mem = h;
odev->buf_size = w_mem * h_mem / 8;
if (odev->buf) kfree(odev->buf);
odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
if (odev->buf == NULL) {
odev->buf_size = 0;
printk(ASUS_OLED_ERROR "Out of memory!\n");
return -ENOMEM;
}
memset(odev->buf, 0xff, odev->buf_size);
odev->buf_offs = 0;
odev->width = w;
odev->height = h;
odev->x_shift = 0;
odev->y_shift = 0;
odev->last_val = 0;
if (odev->pic_mode == ASUS_OLED_FLASH) {
if (h < ASUS_OLED_DISP_HEIGHT/2)
odev->y_shift = (ASUS_OLED_DISP_HEIGHT/2 - h)/2;
}
else {
if (h < ASUS_OLED_DISP_HEIGHT)
odev->y_shift = (ASUS_OLED_DISP_HEIGHT - h)/2;
}
if (w < (odev->dev_width))
odev->x_shift = ((odev->dev_width) - w)/2;
}
max_offs = odev->width * odev->height;
while (offs < count && odev->buf_offs < max_offs) {
int ret;
if (buf[offs] == '1' || buf[offs] == '#') {
if ( (ret = append_values(odev, 1, 1)) < 0) return ret;
}
else if (buf[offs] == '0' || buf[offs] == ' ') {
if ( (ret = append_values(odev, 0, 1)) < 0) return ret;
}
else if (buf[offs] == '\n') {
// New line detected. Lets assume, that all characters till the end of the
// line were equal to the last character in this line.
if (odev->buf_offs % odev->width != 0)
if ( (ret = append_values(odev, odev->last_val,
odev->width - (odev->buf_offs % odev->width))) < 0) return ret;
}
offs++;
}
if (odev->buf_offs >= max_offs) send_data(odev);
return count;
error_width:
printk(ASUS_OLED_ERROR "Wrong picture width specified.\n");
return -EIO;
error_height:
printk(ASUS_OLED_ERROR "Wrong picture height specified.\n");
return -EIO;
error_header:
printk(ASUS_OLED_ERROR "Wrong picture header.\n");
return -EIO;
}
static ssize_t set_picture(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
return odev_set_picture(usb_get_intfdata(intf), buf, count);
}
static ssize_t class_set_picture(struct device *device, struct device_attribute *attr, const char *buf, size_t count)
{
return odev_set_picture((struct asus_oled_dev *) dev_get_drvdata(device), buf, count);
}
#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, get_enabled, set_enabled);
static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, class_get_enabled, class_set_enabled);
static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct asus_oled_dev *odev = NULL;
int retval = -ENOMEM;
uint16_t dev_width = 0;
oled_pack_mode_t pack_mode = PACK_MODE_LAST;
const struct oled_dev_desc_str * dev_desc = oled_dev_desc_table;
const char *desc = 0;
if (id == 0) {
// Even possible? Just to make sure...
dev_err(&interface->dev, "No usb_device_id provided!\n");
return -ENODEV;
}
for (; dev_desc->idVendor; dev_desc++)
{
if (dev_desc->idVendor == id->idVendor
&& dev_desc->idProduct == id->idProduct)
{
dev_width = dev_desc->devWidth;
desc = dev_desc->devDesc;
pack_mode = dev_desc->packMode;
break;
}
}
if ( !desc || dev_width < 1 || pack_mode == PACK_MODE_LAST) {
dev_err(&interface->dev, "Missing or incomplete device description!\n");
return -ENODEV;
}
odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL);
if (odev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
return -ENOMEM;
}
odev->udev = usb_get_dev(udev);
odev->pic_mode = ASUS_OLED_STATIC;
odev->dev_width = dev_width;
odev->pack_mode = pack_mode;
odev->height = 0;
odev->width = 0;
odev->x_shift = 0;
odev->y_shift = 0;
odev->buf_offs = 0;
odev->buf_size = 0;
odev->last_val = 0;
odev->buf = NULL;
odev->enabled = 1;
odev->dev = 0;
usb_set_intfdata (interface, odev);
if ((retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled)))) {
goto err_files;
}
if ((retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture)))) {
goto err_files;
}
odev->dev = device_create(oled_class, &interface->dev, MKDEV(0,0),
NULL,"oled_%d", ++oled_num);
if (IS_ERR(odev->dev)) {
retval = PTR_ERR(odev->dev);
goto err_files;
}
dev_set_drvdata(odev->dev, odev);
if ( (retval = device_create_file(odev->dev, &dev_attr_enabled))) {
goto err_class_enabled;
}
if ( (retval = device_create_file(odev->dev, &dev_attr_picture))) {
goto err_class_picture;
}
dev_info(&interface->dev, "Attached Asus OLED device: %s [width %u, pack_mode %d]\n", desc, odev->dev_width, odev->pack_mode);
if (start_off)
enable_oled(odev, 0);
return 0;
err_class_picture:
device_remove_file(odev->dev, &dev_attr_picture);
err_class_enabled:
device_remove_file(odev->dev, &dev_attr_enabled);
device_unregister(odev->dev);
err_files:
device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
usb_set_intfdata (interface, NULL);
usb_put_dev(odev->udev);
kfree(odev);
return retval;
}
static void asus_oled_disconnect(struct usb_interface *interface)
{
struct asus_oled_dev *odev;
odev = usb_get_intfdata (interface);
usb_set_intfdata (interface, NULL);
device_remove_file(odev->dev, &dev_attr_picture);
device_remove_file(odev->dev, &dev_attr_enabled);
device_unregister(odev->dev);
device_remove_file(&interface->dev, & ASUS_OLED_DEVICE_ATTR(picture));
device_remove_file(&interface->dev, & ASUS_OLED_DEVICE_ATTR(enabled));
usb_put_dev(odev->udev);
if (odev->buf) kfree(odev->buf);
kfree(odev);
dev_info(&interface->dev, "Disconnected Asus OLED device\n");
}
static struct usb_driver oled_driver = {
.name = ASUS_OLED_NAME,
.probe = asus_oled_probe,
.disconnect = asus_oled_disconnect,
.id_table = id_table,
};
static ssize_t version_show(struct class *dev, char *buf)
{
return sprintf(buf, ASUS_OLED_UNDERSCORE_NAME " %s\n", ASUS_OLED_VERSION);
}
static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
static int __init asus_oled_init(void)
{
int retval = 0;
oled_class = class_create(THIS_MODULE, ASUS_OLED_UNDERSCORE_NAME);
if (IS_ERR(oled_class)) {
err("Error creating " ASUS_OLED_UNDERSCORE_NAME " class");
return PTR_ERR(oled_class);
}
if ((retval = class_create_file(oled_class, &class_attr_version))) {
err("Error creating class version file");
goto error;
}
retval = usb_register(&oled_driver);
if (retval) {
err("usb_register failed. Error number %d", retval);
goto error;
}
return retval;
error:
class_destroy(oled_class);
return retval;
}
static void __exit asus_oled_exit(void)
{
class_remove_file(oled_class, &class_attr_version);
class_destroy(oled_class);
usb_deregister(&oled_driver);
}
module_init (asus_oled_init);
module_exit (asus_oled_exit);

View File

@ -0,0 +1,33 @@
<s:74x32>
0
0
00000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000
01111111111000000000000000000000000000000000000000000000000000000000000000
00011111100000000000000111000000000000000000000000000000000000000000000000
00001111000000000000000111000000000000000000000000000000000000000000000000
00001111000000000000000111000000000000000000000000000000000000000000000000
00001111000000000000000000000000000000000000000000000000000000000000000000
00001111000000000000000000000000000000000000000000000000000000000000000000
00001111000000000000011100001111111111100000111110011111100011111101111000
00001111000000000000111110000011111000111000111110000111100001111000110000
00001111000000000001101110000011111000111000001111000111100000111100100000
00001111000000000001001110000011110000111100001111000111100000111101100000
00001111000000000100001110000011110000111100001111000111100000011111000000
00001111000000000100011110000011110000111100001111000111100000001111000000
00001111000000000100011110000011110000111100001111000111100000001111000000
00001111000000000100011100100011110000111100001111000111100000001111100000
00001111000000001100111100100011110000111100001111000111100000001111110000
00001111000000001100111101100011110000111100001111000111100000011011110000
00001111000000011100111101000011110000111100001111000111100000010001111000
00011111000001111100111011000011110000111100001111001111100000110000111100
11111111111111111100011110001111111011111110000111110111111011111011111110
00000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000
0
0
0
0

View File

@ -0,0 +1,18 @@
<f:128x16>
00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000

View File

@ -0,0 +1,33 @@
<f:128x32>
00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000

View File

@ -0,0 +1,33 @@
<s:32x32>
00000000000001111111000000000000
0000000000001 100000000000
000000000001 10000000000
000000000001 10000000000
000000000001 10000000000
000000000001 1 111 10000000000
000000000001 1 1 1000000000
000000000001 111 1000000000
000000000001 111111 1000000000
000000000001 111111 1000000000
000000000001 1 1 100000000
00000000001 11 100000000
00000000001 11111111 10000000
0000000001 11111111 1000000
000000001 111111111 1000000
000000001 1111111111 100000
00000001 11111111111 100000
00000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
000000011 11111111111 10000
000011 11 11111111111 100000
0001 1111 111111111111111 1000
001 1111111 11111111111111 1000
001 1111111 1111111 111111 100
001 11111111 111111 1111111 10
001 11111111 11111 100
001 1111111 111 11100
000111 111 11111 11 100000
000000111 111111111 1000000

View File

@ -0,0 +1,33 @@
<r:32x32>
00000000000001111111000000000000
0000000000001 100000000000
000000000001 10000000000
000000000001 10000000000
000000000001 10000000000
000000000001 1 111 10000000000
000000000001 1 1 1000000000
000000000001 111 1000000000
000000000001 111111 1000000000
000000000001 111111 1000000000
000000000001 1 1 100000000
00000000001 11 100000000
00000000001 11111111 10000000
0000000001 11111111 1000000
000000001 111111111 1000000
000000001 1111111111 100000
00000001 11111111111 100000
00000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
0000001 111111111111 10000
000000011 11111111111 10000
000011 11 11111111111 100000
0001 1111 111111111111111 1000
001 1111111 11111111111111 1000
001 1111111 1111111 111111 100
001 11111111 111111 1111111 10
001 11111111 11111 100
001 1111111 111 11100
000111 111 11111 11 100000
000000111 111111111 1000000

View File

@ -0,0 +1,33 @@
<r:256x32>
000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1 111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1 1 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000001 1 1 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000001 11 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000001 11111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000011100001111111111100000111110011111100011111101111000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000001 11111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000111110000011111000111000111110000111100001111000110000000000000000000000000000000
000000000000000000000000000000000000000000000000000000001 111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001101110000011111000111000001111000111100000111100100000000000000000000000000000000
000000000000000000000000000000000000000000000000000000001 1111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001001110000011110000111100001111000111100000111101100000000000000000000000000000000
00000000000000000000000000000000000000000000000000000001 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100001110000011110000111100001111000111100000011111000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000001 111111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011110000000001000111100000111100001111000011110001111000000011110000000
0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011110000011110000111100001111000111100000001111000000
0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011100100011110000111100001111000111100000001111100000000000000000000000000000000
0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111100100011110000111100001111000111100000001111110000000000000000000000000000000
0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111101100011110000111100001111000111100000011011110000000000000000000000000000000
000000000000000000000000000000000000000000000000000000011 11111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000011100111101000011110000111100001111000111100000010001111000000000000000000000000000000
000000000000000000000000000000000000000000000000000011 11 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011111000001111100111011000011110000111100001111001111100000110000111100000000000000000000000000000
0000000000000000000000000000000000000000000000000001 1111 111111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100011110001111111011111110000111110111111011111011111110000000000000000000000000000
000000000000000000000000000000000000000000000000001 1111111 11111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000001 1111111 1111111 111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000001 11111111 111111 1111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000001 11111111 11111 1000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000001 1111111 111 111000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000111 111 11111 11 10
000000000000000000000000000000000000000000000000000000111 111111111 10

View File

@ -0,0 +1,33 @@
<r:128x32>
10000000000000000000000000000000000000000000000000000000000000011000000000000000000000000000000000000000000000000000000000000001
01000000000000000000000000000000000000000000000000000000000000100100000000000000000000000000000000000000000000000000000000000010
00100000000000000000000000000000000000000000000000000000000001000010000000000000000000000000000000000000000000000000000000000100
00010000000000000000000000000000000000000000000000000000000010000001000000000000000000000000000000000000000000000000000000001000
00001000000000000000000000000000000000000000000000000000000100000000100000000000000000000000000000000000000000000000000000010000
00000100000000000000000000000000000000000000000000000000001000000000010000000000000000000000000000000000000000000000000000100000
00000010000000000000000000000000000000000000000000000000010000000000001000000000000000000000000000000000000000000000000001000000
00000001000000000000000000000000000000000000000000000000100000000000000100000000000000000000000000000000000000000000000010000000
00000000100000000000000000000000000000000000000000000001000000000000000010000000000000000000000000000000000000000000000100000000
00000000010000000000000000000000000000000000000000000010000000000000000001000000000000000000000000000000000000000000001000000000
00000000001000000000000000000000000000000000000000000100000000000000000000100000000000000000000000000000000000000000010000000000
00000000000100000000000000000000000000000000000000001000000000000000000000010000000000000000000000000000000000000000100000000000
00000000000010000000000000000000000000000000000000010000000000000000000000001000000000000000000000000000000000000001000000000000
00000000000001000000000000000000000000000000000000100000000000000000000000000100000000000000000000000000000000000010000000000000
00000000000000100000000000000000000000000000000001000000000000000000000000000010000000000000000000000000000000000100000000000000
00000000000000010000000000000000000000000000000010000000000000000000000000000001000000000000000000000000000000001000000000000000
00000000000000001000000000000000000000000000000100000000000000000000000000000000100000000000000000000000000000010000000000000000
00000000000000000100000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000100000000000000000
00000000000000000010000000000000000000000000010000000000000000000000000000000000001000000000000000000000000001000000000000000000
00000000000000000001000000000000000000000000100000000000000000000000000000000000000100000000000000000000000010000000000000000000
00000000000000000000100000000000000000000001000000000000000000000000000000000000000010000000000000000000000100000000000000000000
00000000000000000000010000000000000000000010000000000000000000000000000000000000000001000000000000000000001000000000000000000000
00000000000000000000001000000000000000000100000000000000000000000000000000000000000000100000000000000000010000000000000000000000
00000000000000000000000100000000000000001000000000000000000000000000000000000000000000010000000000000000100000000000000000000000
00000000000000000000000010000000000000010000000000000000000000000000000000000000000000001000000000000001000000000000000000000000
00000000000000000000000001000000000000100000000000000000000000000000000000000000000000000100000000000010000000000000000000000000
00000000000000000000000000100000000001000000000000000000000000000000000000000000000000000010000000000100000000000000000000000000
00000000000000000000000000010000000010000000000000000000000000000000000000000000000000000001000000001000000000000000000000000000
00000000000000000000000000001000000100000000000000000000000000000000000000000000000000000000100000010000000000000000000000000000
00000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000
00000000000000000000000000000010010000000000000000000000000000000000000000000000000000000000001001000000000000000000000000000000
00000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000

View File

@ -1,6 +1,6 @@
config USB_ATMEL
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
depends on WLAN_80211 && USB
depends on MAC80211 && WLAN_80211 && USB
default N
select FW_LOADER
---help---

File diff suppressed because it is too large Load Diff

View File

@ -34,23 +34,6 @@ enum board_type {
BOARD_505AMX = 8
};
/* our private ioctl's */
/* preamble length (0 - long, 1 - short, 2 - auto) */
#define AT76_SET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 0)
#define AT76_GET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 1)
/* which debug channels are enabled */
#define AT76_SET_DEBUG (SIOCIWFIRSTPRIV + 2)
#define AT76_GET_DEBUG (SIOCIWFIRSTPRIV + 3)
/* power save mode (incl. the Atmel proprietary smart save mode) */
#define AT76_SET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 4)
#define AT76_GET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 5)
/* min and max channel times for scan */
#define AT76_SET_SCAN_TIMES (SIOCIWFIRSTPRIV + 6)
#define AT76_GET_SCAN_TIMES (SIOCIWFIRSTPRIV + 7)
/* scan mode (0 - active, 1 - passive) */
#define AT76_SET_SCAN_MODE (SIOCIWFIRSTPRIV + 8)
#define AT76_GET_SCAN_MODE (SIOCIWFIRSTPRIV + 9)
#define CMD_STATUS_IDLE 0x00
#define CMD_STATUS_COMPLETE 0x01
#define CMD_STATUS_UNKNOWN 0x02
@ -82,6 +65,7 @@ enum board_type {
#define MIB_MAC 0x03
#define MIB_MAC_MGMT 0x05
#define MIB_MAC_WEP 0x06
#define MIB_MAC_ENCRYPTION 0x06
#define MIB_PHY 0x07
#define MIB_FW_VERSION 0x08
#define MIB_MDOMAIN 0x09
@ -106,6 +90,26 @@ enum board_type {
#define AT76_PM_ON 2
#define AT76_PM_SMART 3
/* cipher values for encryption keys */
#define CIPHER_NONE 0 /* this value is only guessed */
#define CIPHER_WEP64 1
#define CIPHER_TKIP 2
#define CIPHER_CCMP 3
#define CIPHER_CCX 4 /* for consistency sake only */
#define CIPHER_WEP128 5
/* bit flags key types for encryption keys */
#define KEY_PAIRWISE 2
#define KEY_TX 4
#define CIPHER_KEYS (4)
#define CIPHER_KEY_LEN (40)
struct key_config {
u8 cipher;
u8 keylen;
};
struct hwcfg_r505 {
u8 cr39_values[14];
u8 reserved1[14];
@ -147,6 +151,9 @@ union at76_hwcfg {
#define WEP_SMALL_KEY_LEN (40 / 8)
#define WEP_LARGE_KEY_LEN (104 / 8)
#define WEP_KEYS (4)
struct at76_card_config {
u8 exclude_unencrypted;
@ -161,7 +168,7 @@ struct at76_card_config {
u8 privacy_invoked;
u8 wep_default_key_id; /* 0..3 */
u8 current_ssid[32];
u8 wep_default_key_value[4][WEP_KEY_LEN];
u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN];
u8 ssid_len;
u8 short_preamble;
__le16 beacon_period;
@ -186,7 +193,7 @@ struct at76_rx_buffer {
u8 link_quality;
u8 noise_level;
__le32 rx_time;
u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
} __attribute__((packed));
/* Length of Atmel-specific Tx header before 802.11 frame */
@ -196,8 +203,11 @@ struct at76_tx_buffer {
__le16 wlength;
u8 tx_rate;
u8 padding;
u8 reserved[4];
u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
u8 key_id;
u8 cipher_type;
u8 cipher_length;
u8 reserved;
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
} __attribute__((packed));
/* defines for scan_type below */
@ -244,6 +254,7 @@ struct set_mib_buffer {
u8 byte;
__le16 word;
u8 addr[ETH_ALEN];
u8 data[256]; /* we need more space for mib_mac_encryption */
} data;
} __attribute__((packed));
@ -317,10 +328,24 @@ struct mib_mac_wep {
u8 exclude_unencrypted;
__le32 wep_icv_error_count;
__le32 wep_excluded_count;
u8 wep_default_keyvalue[WEP_KEYS][WEP_KEY_LEN];
u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
} __attribute__((packed));
struct mib_mac_encryption {
u8 cipher_default_keyvalue[CIPHER_KEYS][CIPHER_KEY_LEN];
u8 tkip_bssid[6];
u8 privacy_invoked;
u8 cipher_default_key_id;
u8 cipher_default_group_key_id;
u8 exclude_unencrypted;
u8 wep_encryption_type;
u8 ckip_key_permutation; /* bool */
__le32 wep_icv_error_count;
__le32 wep_excluded_count;
u8 key_rsc[CIPHER_KEYS][8];
} __attribute__((packed));
struct mib_phy {
__le32 ed_threshold;
@ -364,16 +389,6 @@ struct at76_fw_header {
__le32 ext_fw_len; /* external firmware image length */
} __attribute__((packed));
enum mac_state {
MAC_INIT,
MAC_SCANNING,
MAC_AUTH,
MAC_ASSOC,
MAC_JOINING,
MAC_CONNECTED,
MAC_OWN_IBSS
};
/* a description of a regulatory domain and the allowed channels */
struct reg_domain {
u16 code;
@ -381,47 +396,6 @@ struct reg_domain {
u32 channel_map; /* if bit N is set, channel (N+1) is allowed */
};
/* how long do we keep a (I)BSS in the bss_list in jiffies
this should be long enough for the user to retrieve the table
(by iwlist ?) after the device started, because all entries from
other channels than the one the device locks on get removed, too */
#define BSS_LIST_TIMEOUT (120 * HZ)
/* struct to store BSS info found during scan */
#define BSS_LIST_MAX_RATE_LEN 32 /* 32 rates should be enough ... */
struct bss_info {
struct list_head list;
u8 bssid[ETH_ALEN]; /* bssid */
u8 ssid[IW_ESSID_MAX_SIZE]; /* essid */
u8 ssid_len; /* length of ssid above */
u8 channel;
u16 capa; /* BSS capabilities */
u16 beacon_interval; /* beacon interval, Kus (1024 microseconds) */
u8 rates[BSS_LIST_MAX_RATE_LEN]; /* supported rates in units of
500 kbps, ORed with 0x80 for
basic rates */
u8 rates_len;
/* quality of received beacon */
u8 rssi;
u8 link_qual;
u8 noise_level;
unsigned long last_rx; /* time (jiffies) of last beacon received */
};
/* a rx data buffer to collect rx fragments */
struct rx_data_buf {
u8 sender[ETH_ALEN]; /* sender address */
u16 seqnr; /* sequence number */
u16 fragnr; /* last fragment received */
unsigned long last_rx; /* jiffies of last rx */
struct sk_buff *skb; /* == NULL if entry is free */
};
#define NR_RX_DATA_BUF 8
/* Data for one loaded firmware file */
struct fwentry {
const char *const fwname;
@ -438,11 +412,9 @@ struct fwentry {
struct at76_priv {
struct usb_device *udev; /* USB device pointer */
struct net_device *netdev; /* net device pointer */
struct net_device_stats stats; /* net device stats */
struct iw_statistics wstats; /* wireless stats */
struct sk_buff *rx_skb; /* skbuff for receiving data */
struct sk_buff *tx_skb; /* skbuff for transmitting data */
void *bulk_out_buffer; /* buffer for sending data */
struct urb *tx_urb; /* URB for sending data */
@ -454,26 +426,17 @@ struct at76_priv {
struct mutex mtx; /* locks this structure */
/* work queues */
struct work_struct work_assoc_done;
struct work_struct work_join;
struct work_struct work_new_bss;
struct work_struct work_start_scan;
struct work_struct work_set_promisc;
struct work_struct work_submit_rx;
struct delayed_work dwork_restart;
struct delayed_work dwork_get_scan;
struct delayed_work dwork_beacon;
struct delayed_work dwork_auth;
struct delayed_work dwork_assoc;
struct delayed_work dwork_hw_scan;
struct tasklet_struct rx_tasklet;
/* the WEP stuff */
int wep_enabled; /* 1 if WEP is enabled */
int wep_key_id; /* key id to be used */
u8 wep_keys[WEP_KEYS][WEP_KEY_LEN]; /* the four WEP keys,
5 or 13 bytes are used */
u8 wep_keys_len[WEP_KEYS]; /* the length of the above keys */
u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN]; /* WEP keys */
u8 wep_keys_len[WEP_KEYS]; /* length of WEP keys */
int channel;
int iw_mode;
@ -495,44 +458,13 @@ struct at76_priv {
int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
int scan_need_any; /* if set, need to scan for any ESSID */
/* the list we got from scanning */
spinlock_t bss_list_spinlock; /* protects bss_list operations */
struct list_head bss_list; /* list of BSS we got beacons from */
struct timer_list bss_list_timer; /* timer to purge old entries
from bss_list */
struct bss_info *curr_bss; /* current BSS */
u16 assoc_id; /* current association ID, if associated */
u8 wanted_bssid[ETH_ALEN];
int wanted_bssid_valid; /* != 0 if wanted_bssid is to be used */
/* some data for infrastructure mode only */
spinlock_t mgmt_spinlock; /* this spinlock protects access to
next_mgmt_bulk */
struct at76_tx_buffer *next_mgmt_bulk; /* pending management msg to
send via bulk out */
enum mac_state mac_state;
enum {
SCAN_IDLE,
SCAN_IN_PROGRESS,
SCAN_COMPLETED
} scan_state;
time_t last_scan;
int retries; /* remaining retries in case of timeout when
* sending AuthReq or AssocReq */
u8 pm_mode; /* power management mode */
u32 pm_period; /* power management period in microseconds */
struct reg_domain const *domain; /* reg domain description */
/* iwspy support */
spinlock_t spy_spinlock;
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
/* These fields contain HW config provided by the device (not all of
* these fields are used by all board types) */
u8 mac_addr[ETH_ALEN];
@ -540,9 +472,6 @@ struct at76_priv {
struct at76_card_config card_config;
/* store rx fragments until complete */
struct rx_data_buf rx_data[NR_RX_DATA_BUF];
enum board_type board_type;
struct mib_fw_version fw_version;
@ -550,58 +479,20 @@ struct at76_priv {
unsigned int netdev_registered:1;
struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */
/* beacon counting */
int beacon_period; /* period of mgmt beacons, Kus */
int beacons_received;
unsigned long beacons_last_qual; /* time we restarted counting
beacons */
struct ieee80211_hw *hw;
int mac80211_registered;
struct key_config keys[4]; /* installed key types */
u8 default_pairwise_key;
u8 default_group_key;
};
struct at76_rx_radiotap {
struct ieee80211_radiotap_header rt_hdr;
__le64 rt_tsft;
u8 rt_flags;
u8 rt_rate;
s8 rt_signal;
s8 rt_noise;
};
#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
#define AT76_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DB_ANTNOISE))
#define BEACON_MAX_DATA_LENGTH 1500
/* the maximum size of an AssocReq packet */
#define ASSOCREQ_MAX_SIZE \
(AT76_TX_HDRLEN + sizeof(struct ieee80211_assoc_request) + \
1 + 1 + IW_ESSID_MAX_SIZE + 1 + 1 + 4)
/* for shared secret auth, add the challenge text size */
#define AUTH_FRAME_SIZE (AT76_TX_HDRLEN + sizeof(struct ieee80211_auth))
/* Maximal number of AuthReq retries */
#define AUTH_RETRIES 3
/* Maximal number of AssocReq retries */
#define ASSOC_RETRIES 3
/* Beacon timeout in managed mode when we are connected */
#define BEACON_TIMEOUT (10 * HZ)
/* Timeout for authentication response */
#define AUTH_TIMEOUT (1 * HZ)
/* Timeout for association response */
#define ASSOC_TIMEOUT (1 * HZ)
/* Polling interval when scan is running */
#define SCAN_POLL_INTERVAL (HZ / 4)
/* Command completion timeout */
#define CMD_COMPLETION_TIMEOUT (5 * HZ)
#define DEF_RTS_THRESHOLD 1536
@ -611,8 +502,6 @@ struct at76_rx_radiotap {
#define DEF_SCAN_MIN_TIME 10
#define DEF_SCAN_MAX_TIME 120
#define MAX_RTS_THRESHOLD (MAX_FRAG_THRESHOLD + 1)
/* the max padding size for tx in bytes (see calc_padding) */
#define MAX_PADDING_SIZE 53

View File

@ -0,0 +1,7 @@
config BENET
tristate "ServerEngines 10Gb NIC - BladeEngine"
depends on PCI && INET
select INET_LRO
help
This driver implements the NIC functionality for ServerEngines
10Gb network adapter BladeEngine (EC 3210).

View File

@ -0,0 +1,6 @@
SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
P: Subbu Seetharaman
M: subbus@serverengines.com
L: netdev@vger.kernel.org
W: http://www.serverengines.com
S: Supported

View File

@ -0,0 +1,14 @@
#
# Makefile to build the network driver for ServerEngine's BladeEngine
#
obj-$(CONFIG_BENET) += benet.o
benet-y := be_init.o \
be_int.o \
be_netif.o \
be_ethtool.o \
funcobj.o \
cq.o \
eq.o \
mpu.o \
eth.o

View File

@ -0,0 +1,6 @@
TODO:
- remove wrappers around common iowrite functions
- full netdev audit of common problems/issues
Please send all patches and questions to Subbu Seetharaman
<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>

View File

@ -0,0 +1,82 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __asyncmesg_amap_h__
#define __asyncmesg_amap_h__
#include "fwcmd_common.h"
/* --- ASYNC_EVENT_CODES --- */
#define ASYNC_EVENT_CODE_LINK_STATE (1)
#define ASYNC_EVENT_CODE_ISCSI (2)
/* --- ASYNC_LINK_STATES --- */
#define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */
#define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */
/*
* The last 4 bytes of the async events have this common format. It allows
* the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
* asynchronous events. Both arrive on the same completion queue. This
* structure also contains the common fields used to decode the async event.
*/
struct BE_ASYNC_EVENT_TRAILER_AMAP {
u8 rsvd0[8]; /* DWORD 0 */
u8 event_code[8]; /* DWORD 0 */
u8 event_type[8]; /* DWORD 0 */
u8 rsvd1[6]; /* DWORD 0 */
u8 async_event; /* DWORD 0 */
u8 valid; /* DWORD 0 */
} __packed;
struct ASYNC_EVENT_TRAILER_AMAP {
u32 dw[1];
};
/*
* Applicable in Initiator, Target and NIC modes.
* A link state async event is seen by all device drivers as soon they
* create an MCC ring. Thereafter, anytime the link status changes the
* drivers will receive a link state async event. Notifications continue to
* be sent until a driver destroys its MCC ring. A link down event is
* reported when either port loses link. A link up event is reported
* when either port regains link. When BE's failover mechanism is enabled, a
* link down on the active port causes traffic to be diverted to the standby
* port by the BE's ARM firmware (assuming the standby port has link). In
* this case, the standy port assumes the active status. Note: when link is
* restored on the failed port, traffic continues on the currently active
* port. The ARM firmware does not attempt to 'fail back' traffic to
* the restored port.
*/
struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
u8 port0_link_status[8];
u8 port1_link_status[8];
u8 active_port[8];
u8 rsvd0[8]; /* DWORD 0 */
u8 port0_duplex[8];
u8 port0_speed[8];
u8 port1_duplex[8];
u8 port1_speed[8];
u8 port0_fault[8];
u8 port1_fault[8];
u8 rsvd1[2][8]; /* DWORD 2 */
struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
} __packed;
struct ASYNC_EVENT_LINK_STATE_AMAP {
u32 dw[4];
};
#endif /* __asyncmesg_amap_h__ */

View File

@ -0,0 +1,134 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_cm_amap_h__
#define __be_cm_amap_h__
#include "be_common.h"
#include "etx_context.h"
#include "mpu_context.h"
/*
* --- CEV_WATERMARK_ENUM ---
* CQ/EQ Watermark Encodings. Encoded as number of free entries in
* Queue when Watermark is reached.
*/
#define CEV_WMARK_0 (0) /* Watermark when Queue full */
#define CEV_WMARK_16 (1) /* Watermark at 16 free entries */
#define CEV_WMARK_32 (2) /* Watermark at 32 free entries */
#define CEV_WMARK_48 (3) /* Watermark at 48 free entries */
#define CEV_WMARK_64 (4) /* Watermark at 64 free entries */
#define CEV_WMARK_80 (5) /* Watermark at 80 free entries */
#define CEV_WMARK_96 (6) /* Watermark at 96 free entries */
#define CEV_WMARK_112 (7) /* Watermark at 112 free entries */
#define CEV_WMARK_128 (8) /* Watermark at 128 free entries */
#define CEV_WMARK_144 (9) /* Watermark at 144 free entries */
#define CEV_WMARK_160 (10) /* Watermark at 160 free entries */
#define CEV_WMARK_176 (11) /* Watermark at 176 free entries */
#define CEV_WMARK_192 (12) /* Watermark at 192 free entries */
#define CEV_WMARK_208 (13) /* Watermark at 208 free entries */
#define CEV_WMARK_224 (14) /* Watermark at 224 free entries */
#define CEV_WMARK_240 (15) /* Watermark at 240 free entries */
/*
* --- CQ_CNT_ENUM ---
* Completion Queue Count Encodings.
*/
#define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */
#define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */
#define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */
/*
* --- EQ_CNT_ENUM ---
* Event Queue Count Encodings.
*/
#define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */
/*
* --- EQ_SIZE_ENUM ---
* Event Queue Entry Size Encoding.
*/
#define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */
#define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */
/*
* Completion Queue Context Table Entry. Contains the state of a CQ.
* Located in RAM within the CEV block.
*/
struct BE_CQ_CONTEXT_AMAP {
u8 Cidx[11]; /* DWORD 0 */
u8 Watermark[4]; /* DWORD 0 */
u8 NoDelay; /* DWORD 0 */
u8 EPIdx[11]; /* DWORD 0 */
u8 Count[2]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 SolEvent; /* DWORD 0 */
u8 Eventable; /* DWORD 0 */
u8 Pidx[11]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 EQID[7]; /* DWORD 1 */
u8 Func; /* DWORD 1 */
u8 WME; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
} __packed;
struct CQ_CONTEXT_AMAP {
u32 dw[2];
};
/*
* Event Queue Context Table Entry. Contains the state of an EQ.
* Located in RAM in the CEV block.
*/
struct BE_EQ_CONTEXT_AMAP {
u8 Cidx[13]; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 Func; /* DWORD 0 */
u8 EPIdx[13]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 rsvd1; /* DWORD 0 */
u8 Size; /* DWORD 0 */
u8 Pidx[13]; /* DWORD 1 */
u8 rsvd2[3]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 Count[3]; /* DWORD 1 */
u8 SolEvent; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
u8 Watermark[4]; /* DWORD 2 */
u8 WME; /* DWORD 2 */
u8 rsvd3[3]; /* DWORD 2 */
u8 EventVect[6]; /* DWORD 2 */
u8 rsvd4[2]; /* DWORD 2 */
u8 Delay[8]; /* DWORD 2 */
u8 rsvd5[6]; /* DWORD 2 */
u8 TMR; /* DWORD 2 */
u8 rsvd6; /* DWORD 2 */
u8 rsvd7[32]; /* DWORD 3 */
} __packed;
struct EQ_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __be_cm_amap_h__ */

View File

@ -0,0 +1,53 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_common_amap_h__
#define __be_common_amap_h__
/* Physical Address. */
struct BE_PHYS_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct PHYS_ADDR_AMAP {
u32 dw[2];
};
/* Virtual Address. */
struct BE_VIRT_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct VIRT_ADDR_AMAP {
u32 dw[2];
};
/* Scatter gather element. */
struct BE_SGE_AMAP {
u8 addr_hi[32]; /* DWORD 0 */
u8 addr_lo[32]; /* DWORD 1 */
u8 rsvd0[32]; /* DWORD 2 */
u8 len[16]; /* DWORD 3 */
u8 rsvd1[16]; /* DWORD 3 */
} __packed;
struct SGE_AMAP {
u32 dw[4];
};
#endif /* __be_common_amap_h__ */

View File

@ -0,0 +1,348 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_ethtool.c
*
* This file contains various functions that ethtool can use
* to talk to the driver and the BE H/W.
*/
#include "benet.h"
#include <linux/ethtool.h>
static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
/* net_device_stats */
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"rx_errors",
"tx_errors",
"rx_dropped",
"tx_dropped",
"multicast",
"collisions",
"rx_length_errors",
"rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
"rx_fifo_errors",
"rx_missed_errors",
"tx_aborted_errors",
"tx_carrier_errors",
"tx_fifo_errors",
"tx_heartbeat_errors",
"tx_window_errors",
"rx_compressed",
"tc_compressed",
/* BE driver Stats */
"bes_tx_reqs",
"bes_tx_fails",
"bes_fwd_reqs",
"bes_tx_wrbs",
"bes_interrupts",
"bes_events",
"bes_tx_events",
"bes_rx_events",
"bes_tx_compl",
"bes_rx_compl",
"bes_ethrx_post_fail",
"bes_802_3_dropped_frames",
"bes_802_3_malformed_frames",
"bes_rx_misc_pkts",
"bes_eth_tx_rate",
"bes_eth_rx_rate",
"Num Packets collected",
"Num Times Flushed",
};
#define NET_DEV_STATS_LEN \
(sizeof(struct net_device_stats)/sizeof(unsigned long))
#define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats)
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
strncpy(drvinfo->driver, be_driver_name, 32);
strncpy(drvinfo->version, be_drvr_ver, 32);
strncpy(drvinfo->fw_version, be_fw_ver, 32);
strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static int
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
coalesce->rx_coalesce_usecs = adapter->cur_eqd;
coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
coalesce->tx_coalesce_usecs = adapter->cur_eqd;
coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
return 0;
}
/*
* This routine is used to set interrup coalescing delay *as well as*
* the number of pkts to coalesce for LRO.
*/
static int
be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct be_eq_object *eq_objectp;
u32 max, min, cur;
int status;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
if (adapter->enable_aic == 0 &&
coalesce->use_adaptive_rx_coalesce == 1) {
/* if AIC is being turned on now, start with an EQD of 0 */
adapter->cur_eqd = 0;
}
adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
/* round off to nearest multiple of 8 */
max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
if (adapter->enable_aic) {
/* accept low and high if AIC is enabled */
if (max > MAX_EQD)
max = MAX_EQD;
if (min > max)
min = max;
adapter->max_eqd = max;
adapter->min_eqd = min;
if (adapter->cur_eqd > max)
adapter->cur_eqd = max;
if (adapter->cur_eqd < min)
adapter->cur_eqd = min;
} else {
/* accept specified coalesce_usecs only if AIC is disabled */
if (cur > MAX_EQD)
cur = MAX_EQD;
eq_objectp = &pnob->event_q_obj;
status =
be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
NULL, NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = cur;
}
return 0;
}
static u32 be_get_rx_csum(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
return adapter->rx_csum;
}
static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
if (data)
adapter->rx_csum = 1;
else
adapter->rx_csum = 0;
return 0;
}
static void
be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *benet_gstrings_stats,
sizeof(benet_gstrings_stats));
break;
}
}
static int be_get_stats_count(struct net_device *netdev)
{
return BENET_STATS_LEN;
}
static void
be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
int i;
benet_get_stats(netdev);
for (i = 0; i <= NET_DEV_STATS_LEN; i++)
data[i] = ((unsigned long *)&adapter->benet_stats)[i];
data[i] = adapter->be_stat.bes_tx_reqs;
data[i++] = adapter->be_stat.bes_tx_fails;
data[i++] = adapter->be_stat.bes_fwd_reqs;
data[i++] = adapter->be_stat.bes_tx_wrbs;
data[i++] = adapter->be_stat.bes_ints;
data[i++] = adapter->be_stat.bes_events;
data[i++] = adapter->be_stat.bes_tx_events;
data[i++] = adapter->be_stat.bes_rx_events;
data[i++] = adapter->be_stat.bes_tx_compl;
data[i++] = adapter->be_stat.bes_rx_compl;
data[i++] = adapter->be_stat.bes_ethrx_post_fail;
data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
data[i++] = adapter->be_stat.bes_rx_misc_pkts;
data[i++] = adapter->be_stat.bes_eth_tx_rate;
data[i++] = adapter->be_stat.bes_eth_rx_rate;
data[i++] = adapter->be_stat.bes_rx_coal;
data[i++] = adapter->be_stat.bes_rx_flush;
}
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
/* Get the Ring parameters from the pnob */
static void
be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct be_net_object *pnob = netdev_priv(netdev);
/* Pre Set Maxims */
ring->rx_max_pending = pnob->rx_q_len;
ring->rx_mini_max_pending = ring->rx_mini_max_pending;
ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
ring->tx_max_pending = pnob->tx_q_len;
/* Current hardware Settings */
ring->rx_pending = atomic_read(&pnob->rx_q_posted);
ring->rx_mini_pending = ring->rx_mini_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_pending;
ring->tx_pending = atomic_read(&pnob->tx_q_used);
}
static void
be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool rxfc, txfc;
int status;
status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to get pause frame settings\n");
/* return defaults */
ecmd->rx_pause = 1;
ecmd->tx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
return;
}
if (txfc == true)
ecmd->tx_pause = 1;
else
ecmd->tx_pause = 0;
if (rxfc == true)
ecmd->rx_pause = 1;
else
ecmd->rx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
}
static int
be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool txfc, rxfc;
int status;
if (ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
if (ecmd->tx_pause)
txfc = true;
else
txfc = false;
if (ecmd->rx_pause)
rxfc = true;
else
rxfc = false;
status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to set pause frame settings\n");
return -1;
}
return 0;
}
struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
.get_rx_csum = be_get_rx_csum,
.set_rx_csum = be_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_strings = be_get_strings,
.get_stats_count = be_get_stats_count,
.get_ethtool_stats = be_get_ethtool_stats,
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,863 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include <linux/if_vlan.h>
#include <linux/inet_lro.h>
#include "benet.h"
/* number of bytes of RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define NETIF_RX(skb) netif_receive_skb(skb)
#define VLAN_ACCEL_RX(skb, pnob, vt) \
vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
/*
This function notifies BladeEngine of the number of completion
entries processed from the specified completion queue by writing
the number of popped entries to the door bell.
pnob - Pointer to the NetObject structure
n - Number of completion entries processed
cq_id - Queue ID of the completion queue for which notification
is being done.
re_arm - 1 - rearm the completion ring to generate an event.
- 0 - dont rearm the completion ring to generate an event
*/
void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
{
struct CQ_DB_AMAP cqdb;
cqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
}
/*
* adds additional receive frags indicated by BE starting from given
* frag index (fi) to specified skb's frag list
*/
static void
add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
u32 nresid, u32 fi)
{
struct be_adapter *adapter = pnob->adapter;
u32 sk_frag_idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size;
sk_frag_idx = skb_shinfo(skb)->nr_frags;
while (nresid) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = NULL;
if ((rx_page_info->page_offset) ||
(pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
skb_shinfo(skb)->frags[sk_frag_idx].page_offset
= rx_page_info->page_offset;
skb_shinfo(skb)->frags[sk_frag_idx].size = n;
sk_frag_idx++;
skb->len += n;
skb->data_len += n;
skb_shinfo(skb)->nr_frags++;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
}
/*
* This function processes incoming nic packets over various Rx queues.
* This function takes the adapter, the current Rx status descriptor
* entry and the Rx completion queue ID as argument.
*/
static inline int process_nic_rx_completion(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct sk_buff *skb;
int udpcksm, tcpcksm;
int n;
u32 nresid, fi;
u32 frag_sz = pnob->rx_buf_size;
u8 *va;
struct be_rx_page_info *rx_page_info;
u32 numfrags, vtp, vtm, vlan_tag, pktsize;
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = NULL;
/*
* If one page is used per fragment or if this is the second half of
* of the page, unmap the page here
*/
if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus), frag_sz,
PCI_DMA_FROMDEVICE);
}
atomic_dec(&pnob->rx_q_posted);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
/*
* get rid of RX flush completions first.
*/
if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return 0;
}
skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
if (skb == NULL) {
dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
goto free_frags;
}
skb_reserve(skb, NET_IP_ALIGN);
skb->dev = pnob->netdev;
n = min(pktsize, frag_sz);
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
skb->len = n;
skb->data_len = n;
if (n <= BE_HDR_LEN) {
memcpy(skb->data, va, n);
put_page(rx_page_info->page);
skb->data_len -= n;
skb->tail += n;
} else {
/* Setup the SKB with page buffer information */
skb_shinfo(skb)->frags[0].page = rx_page_info->page;
skb_shinfo(skb)->nr_frags++;
/* Copy the header into the skb_data */
memcpy(skb->data, va, BE_HDR_LEN);
skb_shinfo(skb)->frags[0].page_offset =
rx_page_info->page_offset + BE_HDR_LEN;
skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
skb->data_len -= BE_HDR_LEN;
skb->tail += BE_HDR_LEN;
}
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
nresid = pktsize - n;
skb->protocol = eth_type_trans(skb, pnob->netdev);
if ((tcpcksm || udpcksm) && adapter->rx_csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
/*
* if we have more bytes left, the frame has been
* given to us in multiple fragments. This happens
* with Jumbo frames. Add the remaining fragments to
* skb->frags[] array.
*/
if (nresid)
add_skb_frags(pnob, skb, nresid, fi);
/* update the the true size of the skb. */
skb->truesize = skb->len + sizeof(struct sk_buff);
/*
* If a 802.3 frame or 802.2 LLC frame
* (i.e) contains length field in MAC Hdr
* and frame len is greater than 64 bytes
*/
if (((skb->protocol == ntohs(ETH_P_802_2)) ||
(skb->protocol == ntohs(ETH_P_802_3)))
&& (pktsize > BE_HDR_LEN)) {
/*
* If the length given in Mac Hdr is less than frame size
* Erraneous frame, Drop it
*/
if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
/* Increment Non Ether type II frames dropped */
adapter->be_stat.bes_802_3_dropped_frames++;
kfree_skb(skb);
return 0;
}
/*
* else if the length given in Mac Hdr is greater than
* frame size, should not be seeing this sort of frames
* dump the pkt and pass to stack
*/
else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
/* Increment Non Ether type II frames malformed */
adapter->be_stat.bes_802_3_malformed_frames++;
}
}
vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
if (vtp && vtm) {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (!pnob->vlan_grp || pnob->num_vlans == 0) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
kfree_skb(skb);
return 0;
}
/* pass the VLAN packet to stack */
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
} else {
NETIF_RX(skb);
}
return 0;
free_frags:
/* free all frags associated with the current rxcp */
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
while (numfrags-- > 1) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)
pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
return -ENOMEM;
}
static void process_nic_rx_completion_lro(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
unsigned int udpcksm, tcpcksm;
u32 numfrags, vlanf, vtm, vlan_tag, nresid;
u16 vlant;
unsigned int fi, idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size, pktsize;
bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
u8 err, *va;
__wsum csum = 0;
if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
/* Drop the pkt and move to the next completion. */
adapter->be_stat.bes_rx_misc_pkts++;
return;
}
err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
if (err || !rx_coal) {
/* We won't coalesce Rx pkts if the err bit set.
* take the path of normal completion processing */
process_nic_rx_completion(pnob, rxcp);
return;
}
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = (void *)NULL;
/* If one page is used per fragment or if this is the
* second half of the page, unmap the page here
*/
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
vlant = be16_to_cpu(vlan_tag);
vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
atomic_dec(&pnob->rx_q_posted);
if (tcpcksm && udpcksm && pktsize == 32) {
/* flush completion entries */
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return;
}
/* Only one of udpcksum and tcpcksum can be set */
BUG_ON(udpcksm && tcpcksm);
/* jumbo frames could come in multiple fragments */
BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
n = min(pktsize, frag_sz);
nresid = pktsize - n; /* will be useful for jumbo pkts */
idx = 0;
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
/* If we got multiple fragments, we have more data. */
while (nresid) {
idx++;
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
if (likely(!(vlanf && vtm))) {
lro_receive_frags(&pnob->lro_mgr, rx_frags,
pktsize, pktsize,
(void *)(unsigned long)csum, csum);
} else {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
return;
}
/* pass the VLAN packet to stack */
lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
rx_frags, pktsize, pktsize,
pnob->vlan_grp, vlant,
(void *)(unsigned long)csum,
csum);
}
adapter->be_stat.bes_rx_coal++;
}
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
{
struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
u32 valid, ct;
valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
if (valid == 0)
return NULL;
ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
if (ct != 0) {
/* Invalid chute #. treat as error */
AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
}
be_adv_rxcq_tl(pnob);
AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
return rxcp;
}
static void update_rx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_rx_bytes /
((jiffies - adapter->eth_rx_jiffies) / (HZ));
r = (r / 1000000); /* MB/Sec */
/* Mega Bits/Sec */
adapter->be_stat.bes_eth_rx_rate = (r * 8);
adapter->eth_rx_jiffies = jiffies;
adapter->eth_rx_bytes = 0;
}
}
static int process_rx_completions(struct be_net_object *pnob, int max_work)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_RX_COMPL_AMAP *rxcp;
u32 nc = 0;
unsigned int pktsize;
while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
prefetch(rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
process_nic_rx_completion_lro(pnob, rxcp);
adapter->eth_rx_bytes += pktsize;
update_rx_rate(adapter);
nc++;
max_work--;
adapter->be_stat.bes_rx_compl++;
}
if (likely(adapter->max_rx_coal > 1)) {
adapter->be_stat.bes_rx_flush++;
lro_flush_all(&pnob->lro_mgr);
}
/* Refill the queue */
if (atomic_read(&pnob->rx_q_posted) < 900)
be_post_eth_rx_buffs(pnob);
return nc;
}
static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
{
struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
u32 valid;
valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
if (valid == 0)
return NULL;
AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
be_adv_txcq_tl(pnob);
return txcp;
}
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
{
struct be_adapter *adapter = pnob->adapter;
int cur_index, tx_wrbs_completed = 0;
struct sk_buff *skb;
u64 busaddr, pa, pa_lo, pa_hi;
struct ETH_WRB_AMAP *wrb;
u32 frag_len, last_index, j;
last_index = tx_compl_lastwrb_idx_get(pnob);
BUG_ON(last_index != end_idx);
pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
do {
cur_index = pnob->tx_q_tl;
wrb = &pnob->tx_q[cur_index];
pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
busaddr = (pa_hi << 32) | pa_lo;
if (busaddr != 0) {
pa = le64_to_cpu(busaddr);
pci_unmap_single(adapter->pdev, pa,
frag_len, PCI_DMA_TODEVICE);
}
if (cur_index == last_index) {
skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
BUG_ON(!skb);
for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[j];
pci_unmap_page(adapter->pdev,
(ulong) frag->page, frag->size,
PCI_DMA_TODEVICE);
}
kfree_skb(skb);
pnob->tx_ctxt[cur_index] = NULL;
} else {
BUG_ON(pnob->tx_ctxt[cur_index]);
}
tx_wrbs_completed++;
be_adv_txq_tl(pnob);
} while (cur_index != last_index);
atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
}
/* there is no need to take an SMP lock here since currently
* we have only one instance of the tasklet that does completion
* processing.
*/
static void process_nic_tx_completions(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_TX_COMPL_AMAP *txcp;
struct net_device *netdev = pnob->netdev;
u32 end_idx, num_processed = 0;
adapter->be_stat.bes_tx_events++;
while ((txcp = be_get_tx_cmpl(pnob))) {
end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
process_one_tx_compl(pnob, end_idx);
num_processed++;
adapter->be_stat.bes_tx_compl++;
}
be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
/*
* We got Tx completions and have usable WRBs.
* If the netdev's queue has been stopped
* because we had run out of WRBs, wake it now.
*/
spin_lock(&adapter->txq_lock);
if (netif_queue_stopped(netdev)
&& atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
netif_wake_queue(netdev);
}
spin_unlock(&adapter->txq_lock);
}
static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
{
u32 nposted = 0;
struct ETH_RX_D_AMAP *rxd = NULL;
struct be_recv_buffer *rxbp;
void **rx_ctxp;
struct RQ_DB_AMAP rqdb;
rx_ctxp = pnob->rx_ctxt;
while (!list_empty(rxbl) &&
(rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
list_del(&rxbp->rxb_list);
rxd = pnob->rx_q + pnob->rx_q_hd;
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
be_adv_rxq_hd(pnob);
nposted++;
}
if (nposted) {
/* Now press the door bell to notify BladeEngine. */
rqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
}
atomic_add(nposted, &pnob->rx_q_posted);
return nposted;
}
void be_post_eth_rx_buffs(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
u32 num_bufs, r;
u64 busaddr = 0, tmp_pa;
u32 max_bufs, pg_hd;
u32 frag_size;
struct be_recv_buffer *rxbp;
struct list_head rxbl;
struct be_rx_page_info *rx_page_info;
struct page *page = NULL;
u32 page_order = 0;
gfp_t alloc_flags = GFP_ATOMIC;
BUG_ON(!adapter);
max_bufs = 64; /* should be even # <= 255. */
frag_size = pnob->rx_buf_size;
page_order = get_order(frag_size);
if (frag_size == 8192)
alloc_flags |= (gfp_t) __GFP_COMP;
/*
* Form a linked list of RECV_BUFFFER structure to be be posted.
* We will post even number of buffer so that pages can be
* shared.
*/
INIT_LIST_HEAD(&rxbl);
for (num_bufs = 0; num_bufs < max_bufs &&
!pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
rxbp = &pnob->eth_rx_bufs[num_bufs];
pg_hd = pnob->rx_pg_info_hd;
rx_page_info = &pnob->rx_page_info[pg_hd];
if (!page) {
page = alloc_pages(alloc_flags, page_order);
if (unlikely(page == NULL)) {
adapter->be_stat.bes_ethrx_post_fail++;
pnob->rxbuf_post_fail++;
break;
}
pnob->rxbuf_post_fail = 0;
busaddr = pci_map_page(adapter->pdev, page, 0,
frag_size, PCI_DMA_FROMDEVICE);
rx_page_info->page_offset = 0;
rx_page_info->page = page;
/*
* If we are sharing a page among two skbs,
* alloc a new one on the next iteration
*/
if (pnob->rx_pg_shared == false)
page = NULL;
} else {
get_page(page);
rx_page_info->page_offset += frag_size;
rx_page_info->page = page;
/*
* We are finished with the alloced page,
* Alloc a new one on the next iteration
*/
page = NULL;
}
rxbp->rxb_ctxt = (void *)rx_page_info;
index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
pci_unmap_addr_set(rx_page_info, bus, busaddr);
tmp_pa = busaddr + rx_page_info->page_offset;
rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
rxbp->rxb_pa_hi = (tmp_pa >> 32);
rxbp->rxb_len = frag_size;
list_add_tail(&rxbp->rxb_list, &rxbl);
} /* End of for */
r = post_rx_buffs(pnob, &rxbl);
BUG_ON(r != num_bufs);
return;
}
/*
* Interrupt service for network function. We just schedule the
* tasklet which does all completion processing.
*/
irqreturn_t be_int(int irq, void *dev)
{
struct net_device *netdev = dev;
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 isr;
isr = CSR_READ(&pnob->fn_obj, cev.isr1);
if (unlikely(!isr))
return IRQ_NONE;
spin_lock(&adapter->int_lock);
adapter->isr |= isr;
spin_unlock(&adapter->int_lock);
adapter->be_stat.bes_ints++;
tasklet_schedule(&adapter->sts_handler);
return IRQ_HANDLED;
}
/*
* Poll function called by NAPI with a work budget.
* We process as many UC. BC and MC receive completions
* as the budget allows and return the actual number of
* RX ststutses processed.
*/
int be_poll(struct napi_struct *napi, int budget)
{
struct be_net_object *pnob =
container_of(napi, struct be_net_object, napi);
u32 work_done;
pnob->adapter->be_stat.bes_polls++;
work_done = process_rx_completions(pnob, budget);
BUG_ON(work_done > budget);
/* All consumed */
if (work_done < budget) {
netif_rx_complete(napi);
/* enable intr */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
} else {
/* More to be consumed; continue with interrupts disabled */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
}
return work_done;
}
static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
{
struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
return NULL;
be_adv_eq_tl(pnob);
return eqp;
}
/*
* Processes all valid events in the event ring associated with given
* NetObject. Also, notifies BE the number of events processed.
*/
static inline u32 process_events(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct EQ_ENTRY_AMAP *eqp;
u32 rid, num_events = 0;
struct net_device *netdev = pnob->netdev;
while ((eqp = get_event(pnob)) != NULL) {
adapter->be_stat.bes_events++;
rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
if (rid == pnob->rx_cq_id) {
adapter->be_stat.bes_rx_events++;
netif_rx_schedule(&pnob->napi);
} else if (rid == pnob->tx_cq_id) {
process_nic_tx_completions(pnob);
} else if (rid == pnob->mcc_cq_id) {
be_mcc_process_cq(&pnob->mcc_q_obj, 1);
} else {
dev_info(&netdev->dev,
"Invalid EQ ResourceID %d\n", rid);
}
AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
num_events++;
}
return num_events;
}
static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
{
int status;
struct be_eq_object *eq_objectp;
/* update once a second */
if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
/* One second elapsed since last update */
u32 r, new_eqd = -1;
r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
adapter->be_stat.bes_ips = r;
adapter->ips_jiffies = jiffies;
adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
new_eqd = (adapter->cur_eqd + 8);
if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
new_eqd = (adapter->cur_eqd - 8);
if (adapter->enable_aic && new_eqd != -1) {
eq_objectp = &pnob->event_q_obj;
status = be_eq_modify_delay(&pnob->fn_obj, 1,
&eq_objectp, &new_eqd, NULL,
NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = new_eqd;
}
}
}
/*
This function notifies BladeEngine of how many events were processed
from the event queue by ringing the corresponding door bell and
optionally re-arms the event queue.
n - number of events processed
re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
*/
static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
{
struct CQ_DB_AMAP eqdb;
eqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
/*
* Under some situations we see an interrupt and no valid
* EQ entry. To keep going, we need to ring the DB even if
* numPOsted is 0.
*/
PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
return;
}
/*
* Called from the tasklet scheduled by ISR. All real interrupt processing
* is done here.
*/
void be_process_intr(unsigned long context)
{
struct be_adapter *adapter = (struct be_adapter *)context;
struct be_net_object *pnob = adapter->net_obj;
u32 isr, n;
ulong flags = 0;
isr = adapter->isr;
/*
* we create only one NIC event queue in Linux. Event is
* expected only in the first event queue
*/
BUG_ON(isr & 0xfffffffe);
if ((isr & 1) == 0)
return; /* not our interrupt */
n = process_events(pnob);
/*
* Clear the event bit. adapter->isr is set by
* hard interrupt. Prevent race with lock.
*/
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->isr &= ~1;
spin_unlock_irqrestore(&adapter->int_lock, flags);
be_notify_event(pnob, n, 1);
/*
* If previous allocation attempts had failed and
* BE has used up all posted buffers, post RX buffers here
*/
if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
be_post_eth_rx_buffs(pnob);
update_eqd(adapter, pnob);
return;
}

View File

@ -0,0 +1,705 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_netif.c
*
* This file contains various entry points of drivers seen by tcp/ip stack.
*/
#include <linux/if_vlan.h>
#include <linux/in.h>
#include "benet.h"
#include <linux/ip.h>
#include <linux/inet_lro.h>
/* Strings to print Link properties */
static const char *link_speed[] = {
"Invalid link Speed Value",
"10 Mbps",
"100 Mbps",
"1 Gbps",
"10 Gbps"
};
static const char *link_duplex[] = {
"Invalid Duplex Value",
"Half Duplex",
"Full Duplex"
};
static const char *link_state[] = {
"",
"(active)"
};
void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
{
u16 si, di, ai;
/* Port 0 */
if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
/* Port is up and running */
si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
di = (lnk_status->mac0_duplex < 3) ?
lnk_status->mac0_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 0: Down\n");
/* Port 1 */
if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
/* Port is up and running */
si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
di = (lnk_status->mac1_duplex < 3) ?
lnk_status->mac1_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 1: Down\n");
return;
}
static int
be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr,
u64 *hdr_flags, void *priv)
{
struct ethhdr *eh;
struct vlan_ethhdr *veh;
struct iphdr *iph;
u8 *va = page_address(frag->page) + frag->page_offset;
unsigned long ll_hlen;
/* find the mac header, abort if not IPv4 */
prefetch(va);
eh = (struct ethhdr *)va;
*mac_hdr = eh;
ll_hlen = ETH_HLEN;
if (eh->h_proto != htons(ETH_P_IP)) {
if (eh->h_proto == htons(ETH_P_8021Q)) {
veh = (struct vlan_ethhdr *)va;
if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -1;
ll_hlen += VLAN_HLEN;
} else {
return -1;
}
}
*hdr_flags = LRO_IPV4;
iph = (struct iphdr *)(va + ll_hlen);
*ip_hdr = iph;
if (iph->protocol != IPPROTO_TCP)
return -1;
*hdr_flags |= LRO_TCP;
*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
return 0;
}
static int benet_open(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct net_lro_mgr *lro_mgr;
if (adapter->dev_state < BE_DEV_STATE_INIT)
return -EAGAIN;
lro_mgr = &pnob->lro_mgr;
lro_mgr->dev = netdev;
lro_mgr->features = LRO_F_NAPI;
lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
lro_mgr->lro_arr = pnob->lro_desc;
lro_mgr->get_frag_header = be_get_frag_header;
lro_mgr->max_aggr = adapter->max_rx_coal;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
be_update_link_status(adapter);
/*
* Set carrier on only if Physical Link up
* Either of the port link status up signifies this
*/
if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
(adapter->port1_link_sts == BE_PORT_LINK_UP)) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
adapter->dev_state = BE_DEV_STATE_OPEN;
napi_enable(&pnob->napi);
be_enable_intr(pnob);
be_enable_eq_intr(pnob);
/*
* RX completion queue may be in dis-armed state. Arm it.
*/
be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
return 0;
}
static int benet_close(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
netif_stop_queue(netdev);
synchronize_irq(netdev->irq);
be_wait_nic_tx_cmplx_cmpl(pnob);
adapter->dev_state = BE_DEV_STATE_INIT;
netif_carrier_off(netdev);
adapter->port0_link_sts = BE_PORT_LINK_DOWN;
adapter->port1_link_sts = BE_PORT_LINK_DOWN;
be_disable_intr(pnob);
be_disable_eq_intr(pnob);
napi_disable(&pnob->napi);
return 0;
}
/*
* Setting a Mac Address for BE
* Takes netdev and a void pointer as arguments.
* The pointer holds the new addres to be used.
*/
static int benet_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct be_net_object *pnob = netdev_priv(netdev);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
/*
* Since we are doing Active-Passive failover, both
* ports should have matching MAC addresses everytime.
*/
be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
return 0;
}
void be_get_stats_timer_handler(unsigned long context)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
del_timer(&ctxt->get_stats_timer);
return;
}
void be_get_stat_cb(void *context, int status,
struct MCC_WRB_AMAP *optional_wrb)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
/*
* just up the semaphore if the get_stat_flag
* reads 1. so that the waiter can continue.
* If it is 0, then it was handled by the timer handler.
*/
del_timer(&ctxt->get_stats_timer);
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
}
struct net_device_stats *benet_get_stats(struct net_device *dev)
{
struct be_net_object *pnob = netdev_priv(dev);
struct be_adapter *adapter = pnob->adapter;
u64 pa;
struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
if (adapter->dev_state != BE_DEV_STATE_OPEN) {
/* Return previously read stats */
return &(adapter->benet_stats);
}
/* Get Physical Addr */
pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
atomic_inc(&ctxt->get_stat_flag);
be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
cpu_to_le64(pa), be_get_stat_cb, ctxt,
NULL);
ctxt->get_stats_timer.data = (unsigned long)ctxt;
mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
/* Adding port0 and port1 stats. */
adapter->benet_stats.rx_packets =
adapter->eth_statsp->params.response.p0recvdtotalframes +
adapter->eth_statsp->params.response.p1recvdtotalframes;
adapter->benet_stats.tx_packets =
adapter->eth_statsp->params.response.p0xmitunicastframes +
adapter->eth_statsp->params.response.p1xmitunicastframes;
adapter->benet_stats.tx_bytes =
adapter->eth_statsp->params.response.p0xmitbyteslsd +
adapter->eth_statsp->params.response.p1xmitbyteslsd;
adapter->benet_stats.rx_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_bytes =
adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
adapter->benet_stats.rx_crc_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitbroadcastframes +
adapter->eth_statsp->params.response.p1xmitbroadcastframes;
adapter->benet_stats.tx_errors = 0;
adapter->benet_stats.multicast =
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.rx_fifo_errors =
adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
adapter->benet_stats.rx_frame_errors =
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_length_errors =
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0outrangeerrors +
adapter->eth_statsp->params.response.p1outrangeerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0frametoolongerrors +
adapter->eth_statsp->params.response.p1frametoolongerrors;
pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
return &(adapter->benet_stats);
}
static void be_start_tx(struct be_net_object *pnob, u32 nposted)
{
#define CSR_ETH_MAX_SQPOSTS 255
struct SQ_DB_AMAP sqdb;
sqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
while (nposted) {
if (nposted > CSR_ETH_MAX_SQPOSTS) {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
CSR_ETH_MAX_SQPOSTS);
nposted -= CSR_ETH_MAX_SQPOSTS;
} else {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
nposted = 0;
}
PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
}
return;
}
static void update_tx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_tx_bytes /
((jiffies - adapter->eth_tx_jiffies) / (HZ));
r = (r / 1000000); /* M bytes/s */
adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
adapter->eth_tx_jiffies = jiffies;
adapter->eth_tx_bytes = 0;
}
}
static int wrb_cnt_in_skb(struct sk_buff *skb)
{
int cnt = 0;
while (skb) {
if (skb->len > skb->data_len)
cnt++;
cnt += skb_shinfo(skb)->nr_frags;
skb = skb_shinfo(skb)->frag_list;
}
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
{
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
}
static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
struct be_net_object *pnob)
{
wrb->dw[2] = 0;
wrb->dw[3] = 0;
AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
skb_shinfo(skb)->gso_size);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
if (proto == IPPROTO_TCP)
AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
else if (proto == IPPROTO_UDP)
AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
}
if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
}
}
static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
struct ETH_WRB_AMAP *from)
{
to->dw[2] = from->dw[2];
to->dw[3] = from->dw[3];
}
/* Returns the actual count of wrbs used including a possible dummy */
static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
u32 wrb_cnt, u32 *copied)
{
u64 busaddr;
struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
u32 i;
bool dummy = true;
struct pci_dev *pdev = pnob->adapter->pdev;
if (wrb_cnt & 1)
wrb_cnt++;
else
dummy = false;
atomic_add(wrb_cnt, &pnob->tx_q_used);
while (skb) {
if (skb->len > skb->data_len) {
int len = skb->len - skb->data_len;
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, len);
be_adv_txq_hd(pnob);
*copied += len;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, frag->size);
be_adv_txq_hd(pnob);
*copied += frag->size;
}
skb = skb_shinfo(skb)->frag_list;
}
if (dummy) {
wrb = &pnob->tx_q[pnob->tx_q_hd];
BUG_ON(first == NULL);
wrb_copy_extra(wrb, first);
wrb_fill(wrb, 0, 0);
be_adv_txq_hd(pnob);
}
AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
return wrb_cnt;
}
/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
* start index and skb pointer in the end index
*/
static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
struct sk_buff *skb, int wrb_cnt,
u32 start)
{
*(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
pnob->tx_ctxt[start] = skb;
}
static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 wrb_cnt, copied = 0;
u32 start = pnob->tx_q_hd;
adapter->be_stat.bes_tx_reqs++;
wrb_cnt = wrb_cnt_in_skb(skb);
spin_lock_bh(&adapter->txq_lock);
if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
netif_stop_queue(pnob->netdev);
spin_unlock_bh(&adapter->txq_lock);
adapter->be_stat.bes_tx_fails++;
return NETDEV_TX_BUSY;
}
spin_unlock_bh(&adapter->txq_lock);
wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
be_start_tx(pnob, wrb_cnt);
adapter->eth_tx_bytes += copied;
adapter->be_stat.bes_tx_wrbs += wrb_cnt;
update_tx_rate(adapter);
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/*
* This is the driver entry point to change the mtu of the device
* Returns 0 for success and errno for failure.
*/
static int benet_change_mtu(struct net_device *netdev, int new_mtu)
{
/*
* BE supports jumbo frame size upto 9000 bytes including the link layer
* header. Considering the different variants of frame formats possible
* like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
*/
if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
dev_info(&netdev->dev, "Invalid MTU requested. "
"Must be between %d and %d bytes\n",
(ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
return -EINVAL;
}
dev_info(&netdev->dev, "MTU changed from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
/*
* This is the driver entry point to register a vlan with the device
*/
static void benet_vlan_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct be_net_object *pnob = netdev_priv(netdev);
be_disable_eq_intr(pnob);
pnob->vlan_grp = grp;
pnob->num_vlans = 0;
be_enable_eq_intr(pnob);
}
/*
* This is the driver entry point to add a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
/* no way to return an error */
dev_info(&netdev->dev,
"BladeEngine: Cannot configure more than %d Vlans\n",
BE_NUM_VLAN_SUPPORTED);
return;
}
/* The new vlan tag will be in the slot indicated by num_vlans. */
pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This is the driver entry point to remove a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
u32 i, value;
/*
* In Blade Engine, we support 32 vlan tag filters across both ports.
* To program a vlan tag, the RXF_RTPR_CSR register is used.
* Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
* The Vlan table is of depth 16. thus we support 32 tags.
*/
value = vlan_id | VLAN_VALID_BIT;
for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
if (pnob->vlan_tag[i] == vlan_id)
break;
}
if (i == BE_NUM_VLAN_SUPPORTED)
return;
/* Now compact the vlan tag array by removing hole created. */
while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
i++;
}
if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
pnob->vlan_tag[i] = (u16) 0x0;
pnob->num_vlans--;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This function is called to program multicast
* address in the multicast filter of the ASIC.
*/
static void be_set_multicast_filter(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
u8 mac_addr[32][ETH_ALEN];
int i;
if (netdev->flags & IFF_ALLMULTI) {
/* set BE in Multicast promiscuous */
be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
NULL, NULL);
return;
}
for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
mc_ptr = mc_ptr->next, i++) {
memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
}
/* reset the promiscuous mode also. */
be_rxf_multicast_config(&pnob->fn_obj, false, i,
&mac_addr[0][0], NULL, NULL, NULL);
}
/*
* This is the driver entry point to set multicast list
* with the device netdev. This function will be used to
* set promiscuous mode or multicast promiscuous mode
* or multicast mode....
*/
static void benet_set_multicast_list(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
} else {
be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
be_set_multicast_filter(netdev);
}
}
int benet_init(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
ether_setup(netdev);
netdev->open = &benet_open;
netdev->stop = &benet_close;
netdev->hard_start_xmit = &benet_xmit;
netdev->get_stats = &benet_get_stats;
netdev->set_multicast_list = &benet_set_multicast_list;
netdev->change_mtu = &benet_change_mtu;
netdev->set_mac_address = &benet_set_mac_addr;
netdev->vlan_rx_register = benet_vlan_register;
netdev->vlan_rx_add_vid = benet_vlan_add_vid;
netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
netdev->features =
NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
netdev->flags |= IFF_MULTICAST;
/* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
if (adapter->dma_64bit_cap)
netdev->features |= NETIF_F_HIGHDMA;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
return 0;
}

View File

@ -0,0 +1,429 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BENET_H_
#define _BENET_H_
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/inet_lro.h>
#include "hwlib.h"
#define _SA_MODULE_NAME "net-driver"
#define VLAN_VALID_BIT 0x8000
#define BE_NUM_VLAN_SUPPORTED 32
#define BE_PORT_LINK_DOWN 0000
#define BE_PORT_LINK_UP 0001
#define BE_MAX_TX_FRAG_COUNT (30)
/* Flag bits for send operation */
#define IPCS (1 << 0) /* Enable IP checksum offload */
#define UDPCS (1 << 1) /* Enable UDP checksum offload */
#define TCPCS (1 << 2) /* Enable TCP checksum offload */
#define LSO (1 << 3) /* Enable Large Segment offload */
#define ETHVLAN (1 << 4) /* Enable VLAN insert */
#define ETHEVENT (1 << 5) /* Generate event on completion */
#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
#define IPSEC (1 << 7) /* Enable IPSEC */
#define FORWARD (1 << 8) /* Send the packet in forwarding path */
#define FIN (1 << 9) /* Issue FIN segment */
#define BE_MAX_MTU 8974
#define BE_MAX_LRO_DESCRIPTORS 8
#define BE_LRO_MAX_PKTS 64
#define BE_MAX_FRAGS_PER_FRAME 6
extern const char be_drvr_ver[];
extern char be_fw_ver[];
extern char be_driver_name[];
extern struct ethtool_ops be_ethtool_ops;
#define BE_DEV_STATE_NONE 0
#define BE_DEV_STATE_INIT 1
#define BE_DEV_STATE_OPEN 2
#define BE_DEV_STATE_SUSPEND 3
/* This structure is used to describe physical fragments to use
* for DMAing data from NIC.
*/
struct be_recv_buffer {
struct list_head rxb_list; /* for maintaining a linked list */
void *rxb_va; /* buffer virtual address */
u32 rxb_pa_lo; /* low part of physical address */
u32 rxb_pa_hi; /* high part of physical address */
u32 rxb_len; /* length of recv buffer */
void *rxb_ctxt; /* context for OSM driver to use */
};
/*
* fragment list to describe scattered data.
*/
struct be_tx_frag_list {
u32 txb_len; /* Size of this fragment */
u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
};
struct be_rx_page_info {
struct page *page;
dma_addr_t bus;
u16 page_offset;
};
/*
* This structure is the main tracking structure for a NIC interface.
*/
struct be_net_object {
/* MCC Ring - used to send fwcmds to embedded ARM processor */
struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */
u32 mcc_q_len; /* # of WRB entries in this ring */
u32 mcc_q_size;
u32 mcc_q_hd; /* MCC ring head */
u8 mcc_q_created; /* flag to help cleanup */
struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
dma_addr_t mcc_q_bus; /* DMA'ble bus address */
/* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
u32 mcc_cq_len; /* # of compl. entries in this ring */
u32 mcc_cq_size;
u32 mcc_cq_tl; /* compl. ring tail */
u8 mcc_cq_created; /* flag to help cleanup */
struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
u32 mcc_cq_id; /* MCC ring ID */
dma_addr_t mcc_cq_bus; /* DMA'ble bus address */
struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */
void *mb_ptr; /* mailbox ptr to be freed */
dma_addr_t mb_bus; /* DMA'ble bus address */
u32 mb_size;
/* BEClib uses an array of context objects to track outstanding
* requests to the MCC. We need allocate the same number of
* conext entries as the number of entries in the MCC WRB ring
*/
u32 mcc_wrb_ctxt_size;
void *mcc_wrb_ctxt; /* pointer to the context area */
u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
/*
* NIC send request ring - used for xmitting raw ether frames.
*/
struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */
u32 tx_q_len; /* # if entries in the send ring */
u32 tx_q_size;
u32 tx_q_hd; /* Head index. Next req. goes here */
u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
u8 tx_q_created; /* flag to help cleanup */
struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
dma_addr_t tx_q_bus; /* DMA'ble bus address */
u32 tx_q_id; /* send queue ring ID */
u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
atomic_t tx_q_used; /* # of WRBs used */
/* ptr to an array in which we store context info for each send req. */
void **tx_ctxt;
/*
* NIC Send compl. ring - completion status for all NIC frames xmitted.
*/
struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
u32 txcq_len; /* # of entries in the ring */
u32 tx_cq_size;
/*
* index into compl ring where the host expects next completion entry
*/
u32 tx_cq_tl;
u32 tx_cq_id; /* completion queue id */
u8 tx_cq_created; /* flag to help cleanup */
struct be_cq_object tx_cq_obj;
dma_addr_t tx_cq_bus; /* DMA'ble bus address */
/*
* Event Queue - all completion entries post events here.
*/
struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */
u32 event_q_len; /* # of entries */
u32 event_q_size;
u32 event_q_tl; /* Tail of the event queue */
u32 event_q_id; /* Event queue ID */
u8 event_q_created; /* flag to help cleanup */
struct be_eq_object event_q_obj; /* Queue handle */
dma_addr_t event_q_bus; /* DMA'ble bus address */
/*
* NIC receive queue - Data buffers to be used for receiving unicast,
* broadcast and multi-cast frames are posted here.
*/
struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */
u32 rx_q_len; /* # of entries */
u32 rx_q_size;
u32 rx_q_hd; /* Head of the queue */
atomic_t rx_q_posted; /* number of posted buffers */
u32 rx_q_id; /* queue ID */
u8 rx_q_created; /* flag to help cleanup */
struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */
dma_addr_t rx_q_bus; /* DMA'ble bus address */
/*
* Pointer to an array of opaque context object for use by OSM driver
*/
void **rx_ctxt;
/*
* NIC unicast RX completion queue - all unicast ether frame completion
* statuses from BE come here.
*/
struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */
u32 rx_cq_len; /* # of entries */
u32 rx_cq_size;
u32 rx_cq_tl; /* Tail of the queue */
u32 rx_cq_id; /* queue ID */
u8 rx_cq_created; /* flag to help cleanup */
struct be_cq_object rx_cq_obj; /* queue handle */
dma_addr_t rx_cq_bus; /* DMA'ble bus address */
struct be_function_object fn_obj; /* function object */
bool fn_obj_created;
u32 rx_buf_size; /* Size of the RX buffers */
struct net_device *netdev;
struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
addresses */
struct be_adapter *adapter; /* Pointer to OSM adapter */
u32 devno; /* OSM, network dev no. */
u32 use_port; /* Current active port */
struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */
u32 rx_pg_info_hd; /* Head of queue */
int rxbuf_post_fail; /* RxBuff posting fail count */
bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */
struct vlan_group *vlan_grp;
u32 num_vlans; /* Number of vlans in BE's filter */
u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
struct napi_struct napi;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
};
#define NET_FH(np) (&(np)->fn_obj)
/*
* BE driver statistics.
*/
struct be_drvr_stat {
u32 bes_tx_reqs; /* number of TX requests initiated */
u32 bes_tx_fails; /* number of TX requests that failed */
u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
u32 bes_tx_wrbs; /* number of tx WRBs used */
u32 bes_ints; /* number of interrupts */
u32 bes_polls; /* number of times NAPI called poll function */
u32 bes_events; /* total evet entries processed */
u32 bes_tx_events; /* number of tx completion events */
u32 bes_rx_events; /* number of ucast rx completion events */
u32 bes_tx_compl; /* number of tx completion entries processed */
u32 bes_rx_compl; /* number of rx completion entries
processed */
u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
failures */
/*
* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr
*/
u32 bes_802_3_dropped_frames;
/*
* number of non ether type II frames malformed where
* in frame len < length field of Mac Hdr
*/
u32 bes_802_3_malformed_frames;
u32 bes_ips; /* interrupts / sec */
u32 bes_prev_ints; /* bes_ints at last IPS calculation */
u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
u32 bes_rx_coal; /* Num pkts coalasced */
u32 bes_rx_flush; /* Num times coalasced */
u32 bes_link_change_physical; /*Num of times physical link changed */
u32 bes_link_change_virtual; /*Num of times virtual link changed */
u32 bes_rx_misc_pkts; /* Misc pkts received */
};
/* Maximum interrupt delay (in microseconds) allowed */
#define MAX_EQD 120
/*
* timer to prevent system shutdown hang for ever if h/w stops responding
*/
struct be_timer_ctxt {
atomic_t get_stat_flag;
struct timer_list get_stats_timer;
unsigned long get_stat_sem_addr;
} ;
/* This structure is the main BladeEngine driver context. */
struct be_adapter {
struct net_device *netdevp;
struct be_drvr_stat be_stat;
struct net_device_stats benet_stats;
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
struct tasklet_struct sts_handler;
struct timer_list cq_timer;
spinlock_t int_lock; /* to protect the isr field in adapter */
struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
/*
* This will enable the use of ethtool to enable or disable
* Checksum on Rx pkts to be obeyed or disobeyed.
* If this is true = 1, then whatever is the checksum on the
* Received pkt as per BE, it will be given to the stack.
* Else the stack will re calculate it.
*/
bool rx_csum;
/*
* This will enable the use of ethtool to enable or disable
* Coalese on Rx pkts to be obeyed or disobeyed.
* If this is grater than 0 and less than 16 then coalascing
* is enabled else it is disabled
*/
u32 max_rx_coal;
struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
spinlock_t txq_lock; /* to stop/wake queue based on tx_q_used */
u32 isr; /* copy of Intr status reg. */
u32 port0_link_sts; /* Port 0 link status */
u32 port1_link_sts; /* port 1 list status */
struct BE_LINK_STATUS *be_link_sts;
/* pointer to the first netobject of this adapter */
struct be_net_object *net_obj;
/* Flags to indicate what to clean up */
bool tasklet_started;
bool isr_registered;
/*
* adaptive interrupt coalescing (AIC) related
*/
bool enable_aic; /* 1 if AIC is enabled */
u16 min_eqd; /* minimum EQ delay in usec */
u16 max_eqd; /* minimum EQ delay in usec */
u16 cur_eqd; /* current EQ delay in usec */
/*
* book keeping for interrupt / sec and TX/RX rate calculation
*/
ulong ips_jiffies; /* jiffies at last IPS calc */
u32 eth_tx_bytes;
ulong eth_tx_jiffies;
u32 eth_rx_bytes;
ulong eth_rx_jiffies;
struct semaphore get_eth_stat_sem;
/* timer ctxt to prevent shutdown hanging due to un-responsive BE */
struct be_timer_ctxt timer_ctxt;
#define BE_MAX_MSIX_VECTORS 32
#define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */
struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
bool msix_enabled;
bool dma_64bit_cap; /* the Device DAC capable or not */
u8 dev_state; /* The current state of the device */
u8 dev_pm_state; /* The State of device before going to suspend */
};
/*
* Every second we look at the ints/sec and adjust eq_delay
* between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
* IPS_HI_WM and IPS_LO_WM.
*/
#define IPS_HI_WM 18000
#define IPS_LO_WM 8000
static inline void index_adv(u32 *index, u32 val, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + val) & (limit - 1);
}
static inline void index_inc(u32 *index, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + 1) & (limit - 1);
}
static inline void be_adv_eq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->event_q_tl, pnob->event_q_len);
}
static inline void be_adv_txq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
}
static inline void be_adv_txq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
}
static inline void be_adv_txcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
}
static inline void be_adv_rxq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
}
static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
}
static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
{
return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
& (pnob->tx_q_len - 1);
}
int benet_init(struct net_device *);
int be_ethtool_ioctl(struct net_device *, struct ifreq *);
struct net_device_stats *benet_get_stats(struct net_device *);
void be_process_intr(unsigned long context);
irqreturn_t be_int(int irq, void *dev);
void be_post_eth_rx_buffs(struct be_net_object *);
void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
void be_get_stats_timer_handler(unsigned long);
void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
void be_print_link_info(struct BE_LINK_STATUS *);
void be_update_link_status(struct be_adapter *);
void be_init_procfs(struct be_adapter *);
void be_cleanup_procfs(struct be_adapter *);
int be_poll(struct napi_struct *, int);
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
void be_notify_cmpl(struct be_net_object *, int, int, int);
void be_enable_intr(struct be_net_object *);
void be_enable_eq_intr(struct be_net_object *);
void be_disable_intr(struct be_net_object *);
void be_disable_eq_intr(struct be_net_object *);
int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
u8 *, mcc_wrb_cqe_callback, void *);
int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
#endif /* _BENET_H_ */

View File

@ -0,0 +1,103 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BESTATUS_H_
#define _BESTATUS_H_
#define BE_SUCCESS (0x00000000L)
/*
* MessageId: BE_PENDING
* The BladeEngine Driver call succeeded, and pended operation.
*/
#define BE_PENDING (0x20070001L)
#define BE_STATUS_PENDING (BE_PENDING)
/*
* MessageId: BE_NOT_OK
* An error occurred.
*/
#define BE_NOT_OK (0xE0070002L)
/*
* MessageId: BE_STATUS_SYSTEM_RESOURCES
* Insufficient host system resources exist to complete the API.
*/
#define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L)
/*
* MessageId: BE_STATUS_CHIP_RESOURCES
* Insufficient chip resources exist to complete the API.
*/
#define BE_STATUS_CHIP_RESOURCES (0xE0070004L)
/*
* MessageId: BE_STATUS_NO_RESOURCE
* Insufficient resources to complete request.
*/
#define BE_STATUS_NO_RESOURCE (0xE0070005L)
/*
* MessageId: BE_STATUS_BUSY
* Resource is currently busy.
*/
#define BE_STATUS_BUSY (0xE0070006L)
/*
* MessageId: BE_STATUS_INVALID_PARAMETER
* Invalid Parameter in request.
*/
#define BE_STATUS_INVALID_PARAMETER (0xE0000007L)
/*
* MessageId: BE_STATUS_NOT_SUPPORTED
* Requested operation is not supported.
*/
#define BE_STATUS_NOT_SUPPORTED (0xE000000DL)
/*
* ***************************************************************************
* E T H E R N E T S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_ETH_TX_ERROR
* The Ethernet device driver failed to transmit a packet.
*/
#define BE_ETH_TX_ERROR (0xE0070101L)
/*
* ***************************************************************************
* S H A R E D S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_STATUS_VBD_INVALID_VERSION
* The device driver is not compatible with this version of the VBD.
*/
#define BE_STATUS_INVALID_VERSION (0xE0070402L)
/*
* MessageId: BE_STATUS_DOMAIN_DENIED
* The operation failed to complete due to insufficient access
* rights for the requesting domain.
*/
#define BE_STATUS_DOMAIN_DENIED (0xE0070403L)
/*
* MessageId: BE_STATUS_TCP_NOT_STARTED
* The embedded TCP/IP stack has not been started.
*/
#define BE_STATUS_TCP_NOT_STARTED (0xE0070409L)
/*
* MessageId: BE_STATUS_NO_MCC_WRB
* No free MCC WRB are available for posting the request.
*/
#define BE_STATUS_NO_MCC_WRB (0xE0070414L)
#endif /* _BESTATUS_ */

243
drivers/staging/benet/cev.h Normal file
View File

@ -0,0 +1,243 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __cev_amap_h__
#define __cev_amap_h__
#include "ep.h"
/*
* Host Interrupt Status Register 0. The first of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ0 through EQ31.
*/
struct BE_CEV_ISR0_CSR_AMAP {
u8 interrupt0; /* DWORD 0 */
u8 interrupt1; /* DWORD 0 */
u8 interrupt2; /* DWORD 0 */
u8 interrupt3; /* DWORD 0 */
u8 interrupt4; /* DWORD 0 */
u8 interrupt5; /* DWORD 0 */
u8 interrupt6; /* DWORD 0 */
u8 interrupt7; /* DWORD 0 */
u8 interrupt8; /* DWORD 0 */
u8 interrupt9; /* DWORD 0 */
u8 interrupt10; /* DWORD 0 */
u8 interrupt11; /* DWORD 0 */
u8 interrupt12; /* DWORD 0 */
u8 interrupt13; /* DWORD 0 */
u8 interrupt14; /* DWORD 0 */
u8 interrupt15; /* DWORD 0 */
u8 interrupt16; /* DWORD 0 */
u8 interrupt17; /* DWORD 0 */
u8 interrupt18; /* DWORD 0 */
u8 interrupt19; /* DWORD 0 */
u8 interrupt20; /* DWORD 0 */
u8 interrupt21; /* DWORD 0 */
u8 interrupt22; /* DWORD 0 */
u8 interrupt23; /* DWORD 0 */
u8 interrupt24; /* DWORD 0 */
u8 interrupt25; /* DWORD 0 */
u8 interrupt26; /* DWORD 0 */
u8 interrupt27; /* DWORD 0 */
u8 interrupt28; /* DWORD 0 */
u8 interrupt29; /* DWORD 0 */
u8 interrupt30; /* DWORD 0 */
u8 interrupt31; /* DWORD 0 */
} __packed;
struct CEV_ISR0_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 1. The second of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ32 through EQ63.
*/
struct BE_CEV_ISR1_CSR_AMAP {
u8 interrupt32; /* DWORD 0 */
u8 interrupt33; /* DWORD 0 */
u8 interrupt34; /* DWORD 0 */
u8 interrupt35; /* DWORD 0 */
u8 interrupt36; /* DWORD 0 */
u8 interrupt37; /* DWORD 0 */
u8 interrupt38; /* DWORD 0 */
u8 interrupt39; /* DWORD 0 */
u8 interrupt40; /* DWORD 0 */
u8 interrupt41; /* DWORD 0 */
u8 interrupt42; /* DWORD 0 */
u8 interrupt43; /* DWORD 0 */
u8 interrupt44; /* DWORD 0 */
u8 interrupt45; /* DWORD 0 */
u8 interrupt46; /* DWORD 0 */
u8 interrupt47; /* DWORD 0 */
u8 interrupt48; /* DWORD 0 */
u8 interrupt49; /* DWORD 0 */
u8 interrupt50; /* DWORD 0 */
u8 interrupt51; /* DWORD 0 */
u8 interrupt52; /* DWORD 0 */
u8 interrupt53; /* DWORD 0 */
u8 interrupt54; /* DWORD 0 */
u8 interrupt55; /* DWORD 0 */
u8 interrupt56; /* DWORD 0 */
u8 interrupt57; /* DWORD 0 */
u8 interrupt58; /* DWORD 0 */
u8 interrupt59; /* DWORD 0 */
u8 interrupt60; /* DWORD 0 */
u8 interrupt61; /* DWORD 0 */
u8 interrupt62; /* DWORD 0 */
u8 interrupt63; /* DWORD 0 */
} __packed;
struct CEV_ISR1_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 2. The third of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ64 through EQ95.
*/
struct BE_CEV_ISR2_CSR_AMAP {
u8 interrupt64; /* DWORD 0 */
u8 interrupt65; /* DWORD 0 */
u8 interrupt66; /* DWORD 0 */
u8 interrupt67; /* DWORD 0 */
u8 interrupt68; /* DWORD 0 */
u8 interrupt69; /* DWORD 0 */
u8 interrupt70; /* DWORD 0 */
u8 interrupt71; /* DWORD 0 */
u8 interrupt72; /* DWORD 0 */
u8 interrupt73; /* DWORD 0 */
u8 interrupt74; /* DWORD 0 */
u8 interrupt75; /* DWORD 0 */
u8 interrupt76; /* DWORD 0 */
u8 interrupt77; /* DWORD 0 */
u8 interrupt78; /* DWORD 0 */
u8 interrupt79; /* DWORD 0 */
u8 interrupt80; /* DWORD 0 */
u8 interrupt81; /* DWORD 0 */
u8 interrupt82; /* DWORD 0 */
u8 interrupt83; /* DWORD 0 */
u8 interrupt84; /* DWORD 0 */
u8 interrupt85; /* DWORD 0 */
u8 interrupt86; /* DWORD 0 */
u8 interrupt87; /* DWORD 0 */
u8 interrupt88; /* DWORD 0 */
u8 interrupt89; /* DWORD 0 */
u8 interrupt90; /* DWORD 0 */
u8 interrupt91; /* DWORD 0 */
u8 interrupt92; /* DWORD 0 */
u8 interrupt93; /* DWORD 0 */
u8 interrupt94; /* DWORD 0 */
u8 interrupt95; /* DWORD 0 */
} __packed;
struct CEV_ISR2_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 3. The fourth of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ96 through EQ127.
*/
struct BE_CEV_ISR3_CSR_AMAP {
u8 interrupt96; /* DWORD 0 */
u8 interrupt97; /* DWORD 0 */
u8 interrupt98; /* DWORD 0 */
u8 interrupt99; /* DWORD 0 */
u8 interrupt100; /* DWORD 0 */
u8 interrupt101; /* DWORD 0 */
u8 interrupt102; /* DWORD 0 */
u8 interrupt103; /* DWORD 0 */
u8 interrupt104; /* DWORD 0 */
u8 interrupt105; /* DWORD 0 */
u8 interrupt106; /* DWORD 0 */
u8 interrupt107; /* DWORD 0 */
u8 interrupt108; /* DWORD 0 */
u8 interrupt109; /* DWORD 0 */
u8 interrupt110; /* DWORD 0 */
u8 interrupt111; /* DWORD 0 */
u8 interrupt112; /* DWORD 0 */
u8 interrupt113; /* DWORD 0 */
u8 interrupt114; /* DWORD 0 */
u8 interrupt115; /* DWORD 0 */
u8 interrupt116; /* DWORD 0 */
u8 interrupt117; /* DWORD 0 */
u8 interrupt118; /* DWORD 0 */
u8 interrupt119; /* DWORD 0 */
u8 interrupt120; /* DWORD 0 */
u8 interrupt121; /* DWORD 0 */
u8 interrupt122; /* DWORD 0 */
u8 interrupt123; /* DWORD 0 */
u8 interrupt124; /* DWORD 0 */
u8 interrupt125; /* DWORD 0 */
u8 interrupt126; /* DWORD 0 */
u8 interrupt127; /* DWORD 0 */
} __packed;
struct CEV_ISR3_CSR_AMAP {
u32 dw[1];
};
/* Completions and Events block Registers. */
struct BE_CEV_CSRMAP_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
u8 rsvd2[32]; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
struct BE_CEV_ISR0_CSR_AMAP isr0;
struct BE_CEV_ISR1_CSR_AMAP isr1;
struct BE_CEV_ISR2_CSR_AMAP isr2;
struct BE_CEV_ISR3_CSR_AMAP isr3;
u8 rsvd4[32]; /* DWORD 8 */
u8 rsvd5[32]; /* DWORD 9 */
u8 rsvd6[32]; /* DWORD 10 */
u8 rsvd7[32]; /* DWORD 11 */
u8 rsvd8[32]; /* DWORD 12 */
u8 rsvd9[32]; /* DWORD 13 */
u8 rsvd10[32]; /* DWORD 14 */
u8 rsvd11[32]; /* DWORD 15 */
u8 rsvd12[32]; /* DWORD 16 */
u8 rsvd13[32]; /* DWORD 17 */
u8 rsvd14[32]; /* DWORD 18 */
u8 rsvd15[32]; /* DWORD 19 */
u8 rsvd16[32]; /* DWORD 20 */
u8 rsvd17[32]; /* DWORD 21 */
u8 rsvd18[32]; /* DWORD 22 */
u8 rsvd19[32]; /* DWORD 23 */
u8 rsvd20[32]; /* DWORD 24 */
u8 rsvd21[32]; /* DWORD 25 */
u8 rsvd22[32]; /* DWORD 26 */
u8 rsvd23[32]; /* DWORD 27 */
u8 rsvd24[32]; /* DWORD 28 */
u8 rsvd25[32]; /* DWORD 29 */
u8 rsvd26[32]; /* DWORD 30 */
u8 rsvd27[32]; /* DWORD 31 */
u8 rsvd28[32]; /* DWORD 32 */
u8 rsvd29[32]; /* DWORD 33 */
u8 rsvd30[192]; /* DWORD 34 */
u8 rsvd31[192]; /* DWORD 40 */
u8 rsvd32[160]; /* DWORD 46 */
u8 rsvd33[160]; /* DWORD 51 */
u8 rsvd34[160]; /* DWORD 56 */
u8 rsvd35[96]; /* DWORD 61 */
u8 rsvd36[192][32]; /* DWORD 64 */
} __packed;
struct CEV_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __cev_amap_h__ */

211
drivers/staging/benet/cq.c Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
* Completion Queue Objects
*/
/*
*============================================================================
* P U B L I C R O U T I N E S
*============================================================================
*/
/*
This routine creates a completion queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
CqBaseVa - Base VA for a the CQ ring
NumEntries - CEV_CQ_CNT_* values
solEventEnable - 0 = All CQEs can generate Events if CQ is eventable
1 = only CQEs with solicited bit set are eventable
eventable - Eventable CQ, generates interrupts.
nodelay - 1 = Force interrupt, relevent if CQ eventable.
Interrupt is asserted immediately after EQE
write is confirmed, regardless of EQ Timer
or watermark settings.
wme - Enable watermark based coalescing
wmThresh - High watermark(CQ fullness at which event
or interrupt should be asserted). These are the
CEV_WATERMARK encoded values.
EqObject - EQ Handle to assign to this CQ
ppCqObject - Internal CQ Handle returned.
Returns BE_SUCCESS if successfull, otherwise a useful error code is
returned.
IRQL < DISPATCH_LEVEL
*/
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length, bool solicited_eventable,
bool no_delay, u32 wm_thresh,
struct be_eq_object *eq_object, struct be_cq_object *cq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding;
u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(cq_object);
ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_CQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_CQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_CQ_CNT_1024;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
/*
* All cq entries all the same size. Use iSCSI version
* as a test for the proper rd length.
*/
memset(cq_object, 0, sizeof(*cq_object));
atomic_set(&cq_object->ref_count, 0);
cq_object->parent_function = pfob;
cq_object->eq_object = eq_object;
cq_object->num_entries = num_entries;
/* save for MCC cq processing */
cq_object->va = rd->va;
/* map into UT. */
length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
n = (eq_object != NULL);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
&fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
n = eq_object ? eq_object->eq_id : 0;
AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
&fwcmd->params.request.context, num_entries_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
&fwcmd->params.request.context, no_delay);
AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
&fwcmd->params.request.context, solicited_eventable);
n = (wm_thresh != 0xFFFFFFFF);
AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
n = (n ? wm_thresh : 0);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
&fwcmd->params.request.context, n);
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create CQ failed.");
goto Error;
}
/* Remember the CQ id. */
cq_object->cq_id = fwcmd->params.response.cq_id;
/* insert this cq into eq_object reference */
if (eq_object) {
atomic_inc(&eq_object->ref_count);
list_add_tail(&cq_object->cqlist_for_eq,
&eq_object->cq_list_head);
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this object
are released. The on-chip context is also destroyed along with the queue
ID, and any mappings made into the UT.
cq_object - CQ handle returned from cq_object_create.
returns the current reference count on the object
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_cq_destroy(struct be_cq_object *cq_object)
{
int status = 0;
/* Nothing should reference this CQ at this point. */
ASSERT(atomic_read(&cq_object->ref_count) == 0);
/* Send fwcmd to destroy the CQ. */
status = be_function_ring_destroy(cq_object->parent_function,
cq_object->cq_id, FWCMD_RING_TYPE_CQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
/* Remove reference if this is an eventable CQ. */
if (cq_object->eq_object) {
atomic_dec(&cq_object->eq_object->ref_count);
list_del(&cq_object->cqlist_for_eq);
}
return BE_SUCCESS;
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __descriptors_amap_h__
#define __descriptors_amap_h__
/*
* --- IPC_NODE_ID_ENUM ---
* IPC processor id values
*/
#define TPOST_NODE_ID (0) /* TPOST ID */
#define TPRE_NODE_ID (1) /* TPRE ID */
#define TXULP0_NODE_ID (2) /* TXULP0 ID */
#define TXULP1_NODE_ID (3) /* TXULP1 ID */
#define TXULP2_NODE_ID (4) /* TXULP2 ID */
#define RXULP0_NODE_ID (5) /* RXULP0 ID */
#define RXULP1_NODE_ID (6) /* RXULP1 ID */
#define RXULP2_NODE_ID (7) /* RXULP2 ID */
#define MPU_NODE_ID (15) /* MPU ID */
/*
* --- MAC_ID_ENUM ---
* Meaning of the mac_id field in rxpp_eth_d
*/
#define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */
#define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */
#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
#define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */
#define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */
#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
#define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */
/* from 0x8-0x26, one per PD. */
#define LAST_VM_MAC (38) /* PD 31 MAC. */
#define MGMT_MAC (39) /* Management port MAC. */
#define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA = 00
*/
#define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA != 00
*/
#define NULL_MAC (61) /* Promiscuous mode, indicates no match */
#define MCAST_MAC (62) /* Multicast match. */
#define BCAST_MATCH (63) /* Broadcast match. */
#endif /* __descriptors_amap_h__ */

View File

@ -0,0 +1,179 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __doorbells_amap_h__
#define __doorbells_amap_h__
/* The TX/RDMA send queue doorbell. */
struct BE_SQ_DB_AMAP {
u8 cid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct SQ_DB_AMAP {
u32 dw[1];
};
/* The receive queue doorbell. */
struct BE_RQ_DB_AMAP {
u8 rq[10]; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 Invalidate; /* DWORD 0 */
u8 numPosted[8]; /* DWORD 0 */
} __packed;
struct RQ_DB_AMAP {
u32 dw[1];
};
/*
* The CQ/EQ doorbell. Software MUST set reserved fields in this
* descriptor to zero, otherwise (CEV) hardware will not execute the
* doorbell (flagging a bad_db_qid error instead).
*/
struct BE_CQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 num_popped[13]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
} __packed;
struct CQ_DB_AMAP {
u32 dw[1];
};
struct BE_TPM_RQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 numPosted[11]; /* DWORD 0 */
u8 mss_cnt[5]; /* DWORD 0 */
} __packed;
struct TPM_RQ_DB_AMAP {
u32 dw[1];
};
/*
* Post WRB Queue Doorbell Register used by the host Storage stack
* to notify the controller of a posted Work Request Block
*/
struct BE_WRB_POST_DB_AMAP {
u8 wrb_cid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 wrb_index[8]; /* DWORD 0 */
u8 numberPosted[8]; /* DWORD 0 */
} __packed;
struct WRB_POST_DB_AMAP {
u32 dw[1];
};
/*
* Update Default PDU Queue Doorbell Register used to communicate
* to the controller that the driver has stopped processing the queue
* and where in the queue it stopped, this is
* a CQ Entry Type. Used by storage driver.
*/
struct BE_DEFAULT_PDU_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 cqproc[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct DEFAULT_PDU_DB_AMAP {
u32 dw[1];
};
/* Management Command and Controller default fragment ring */
struct BE_MCC_DB_AMAP {
u8 rid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct MCC_DB_AMAP {
u32 dw[1];
};
/*
* Used for bootstrapping the Host interface. This register is
* used for driver communication with the MPU when no MCC Rings exist.
* The software must write this register twice to post any MCC
* command. First, it writes the register with hi=1 and the upper bits of
* the physical address for the MCC_MAILBOX structure. Software must poll
* the ready bit until this is acknowledged. Then, sotware writes the
* register with hi=0 with the lower bits in the address. It must
* poll the ready bit until the MCC command is complete. Upon completion,
* the MCC_MAILBOX will contain a valid completion queue entry.
*/
struct BE_MPU_MAILBOX_DB_AMAP {
u8 ready; /* DWORD 0 */
u8 hi; /* DWORD 0 */
u8 address[30]; /* DWORD 0 */
} __packed;
struct MPU_MAILBOX_DB_AMAP {
u32 dw[1];
};
/*
* This is the protection domain doorbell register map. Note that
* while this map shows doorbells for all Blade Engine supported
* protocols, not all of these may be valid in a given function or
* protection domain. It is the responsibility of the application
* accessing the doorbells to know which are valid. Each doorbell
* occupies 32 bytes of space, but unless otherwise specified,
* only the first 4 bytes should be written. There are 32 instances
* of these doorbells for the host and 31 virtual machines respectively.
* The host and VMs will only map the doorbell pages belonging to its
* protection domain. It will not be able to touch the doorbells for
* another VM. The doorbells are the only registers directly accessible
* by a virtual machine. Similarly, there are 511 additional
* doorbells for RDMA protection domains. PD 0 for RDMA shares
* the same physical protection domain doorbell page as ETH/iSCSI.
*
*/
struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
u8 rsvd0[512]; /* DWORD 0 */
struct BE_SQ_DB_AMAP rdma_sq_db;
u8 rsvd1[7][32]; /* DWORD 17 */
struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
u8 rsvd2[7][32]; /* DWORD 25 */
struct BE_SQ_DB_AMAP etx_sq_db;
u8 rsvd3[7][32]; /* DWORD 33 */
struct BE_RQ_DB_AMAP rdma_rq_db;
u8 rsvd4[7][32]; /* DWORD 41 */
struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
u8 rsvd5[7][32]; /* DWORD 49 */
struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
u8 rsvd6[7][32]; /* DWORD 57 */
struct BE_RQ_DB_AMAP erx_rq_db;
u8 rsvd7[7][32]; /* DWORD 65 */
struct BE_CQ_DB_AMAP cq_db;
u8 rsvd8[7][32]; /* DWORD 73 */
struct BE_MCC_DB_AMAP mpu_mcc_db;
u8 rsvd9[7][32]; /* DWORD 81 */
struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
u8 rsvd10[935][32]; /* DWORD 89 */
} __packed;
struct PROTECTION_DOMAIN_DBMAP_AMAP {
u32 dw[1024];
};
#endif /* __doorbells_amap_h__ */

View File

@ -0,0 +1,66 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __ep_amap_h__
#define __ep_amap_h__
/* General Control and Status Register. */
struct BE_EP_CONTROL_CSR_AMAP {
u8 m0_RxPbuf; /* DWORD 0 */
u8 m1_RxPbuf; /* DWORD 0 */
u8 m2_RxPbuf; /* DWORD 0 */
u8 ff_en; /* DWORD 0 */
u8 rsvd0[27]; /* DWORD 0 */
u8 CPU_reset; /* DWORD 0 */
} __packed;
struct EP_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_EP_SEMAPHORE_CSR_AMAP {
u8 value[32]; /* DWORD 0 */
} __packed;
struct EP_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Embedded Processor Specific Registers. */
struct BE_EP_CSRMAP_AMAP {
struct BE_EP_CONTROL_CSR_AMAP ep_control;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
u8 rsvd3[32]; /* DWORD 4 */
u8 rsvd4[32]; /* DWORD 5 */
u8 rsvd5[8][128]; /* DWORD 6 */
u8 rsvd6[32]; /* DWORD 38 */
u8 rsvd7[32]; /* DWORD 39 */
u8 rsvd8[32]; /* DWORD 40 */
u8 rsvd9[32]; /* DWORD 41 */
u8 rsvd10[32]; /* DWORD 42 */
struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
u8 rsvd11[32]; /* DWORD 44 */
u8 rsvd12[19][32]; /* DWORD 45 */
} __packed;
struct EP_CSRMAP_AMAP {
u32 dw[64];
};
#endif /* __ep_amap_h__ */

299
drivers/staging/benet/eq.c Normal file
View File

@ -0,0 +1,299 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
This routine creates an event queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
EqBaseVa - Base VA for a the EQ ring
SizeEncoding - The encoded size for the EQ entries. This value is
either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
NumEntries - CEV_CQ_CNT_* values.
Watermark - Enables watermark based coalescing. This parameter
must be of the type CEV_WMARK_* if watermarks
are enabled. If watermarks to to be disabled
this value should be-1.
TimerDelay - If a timer delay is enabled this value should be the
time of the delay in 8 microsecond units. If
delays are not used this parameter should be
set to -1.
ppEqObject - Internal EQ Handle returned.
Returns BE_SUCCESS if successfull,, otherwise a useful error code
is returned.
IRQL < DISPATCH_LEVEL
*/
int
be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, /* CEV_WMARK_* or -1 */
u32 timer_delay, /* in 8us units, or -1 */
struct be_eq_object *eq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding, eqe_size_encoding, length;
struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(eq_object);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_EQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_EQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_EQ_CNT_1024;
break;
case 2048:
num_entries_encoding = CEV_EQ_CNT_2048;
break;
case 4096:
num_entries_encoding = CEV_EQ_CNT_4096;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
switch (eqe_size) {
case 4:
eqe_size_encoding = CEV_EQ_SIZE_4;
break;
case 16:
eqe_size_encoding = CEV_EQ_SIZE_16;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
if ((eqe_size == 4 && num_entries < 1024) ||
(eqe_size == 16 && num_entries == 4096)) {
TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
eqe_size, num_entries);
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
memset(eq_object, 0, sizeof(*eq_object));
atomic_set(&eq_object->ref_count, 0);
eq_object->parent_function = pfob;
eq_object->eq_id = 0xFFFFFFFF;
INIT_LIST_HEAD(&eq_object->cq_list_head);
length = num_entries * eqe_size;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
&fwcmd->params.request.context, eqe_size_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
/* Let the caller ARM the EQ with the doorbell. */
AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
num_entries_encoding);
n = pfob->pci_function_number * 32;
AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
&fwcmd->params.request.context, n);
if (watermark != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
&fwcmd->params.request.context, watermark);
ASSERT(watermark <= CEV_WMARK_240);
} else
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 0);
if (timer_delay != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 1);
ASSERT(timer_delay <= 250); /* max value according to EAS */
timer_delay = min(timer_delay, (u32)250);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
&fwcmd->params.request.context, timer_delay);
} else {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 0);
}
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create EQ failed.");
goto Error;
}
/* Get the EQ id. The MPU allocates the IDs. */
eq_object->eq_id = fwcmd->params.response.eq_id;
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this
object are released. The on-chip context is also destroyed along with
the queue ID, and any mappings made into the UT.
eq_object - EQ handle returned from eq_object_create.
Returns BE_SUCCESS if successfull, otherwise a useful error code
is returned.
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_eq_destroy(struct be_eq_object *eq_object)
{
int status = 0;
ASSERT(atomic_read(&eq_object->ref_count) == 0);
/* no CQs should reference this EQ now */
ASSERT(list_empty(&eq_object->cq_list_head));
/* Send fwcmd to destroy the EQ. */
status = be_function_ring_destroy(eq_object->parent_function,
eq_object->eq_id, FWCMD_RING_TYPE_EQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
return BE_SUCCESS;
}
/*
*---------------------------------------------------------------------------
* Function: be_eq_modify_delay
* Changes the EQ delay for a group of EQs.
* num_eq - The number of EQs in the eq_array to adjust.
* This also is the number of delay values in
* the eq_delay_array.
* eq_array - Array of struct be_eq_object pointers to adjust.
* eq_delay_array - Array of "num_eq" timer delays in units
* of microseconds. The be_eq_query_delay_range
* fwcmd returns the resolution and range of
* legal EQ delays.
* cb -
* cb_context -
* q_ctxt - Optional. Pointer to a previously allocated
* struct. If the MCC WRB ring is full, this
* structure is used to queue the operation. It
* will be posted to the MCC ring when space
* becomes available. All queued commands will
* be posted to the ring in the order they are
* received. It is always valid to pass a pointer to
* a generic be_generic_q_cntxt. However,
* the specific context structs
* are generally smaller than the generic struct.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*-------------------------------------------------------------------------
*/
int
be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
{
struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
struct be_generic_q_ctxt *gen_ctxt = NULL;
u32 i;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
if (q_ctxt && cb) {
wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
gen_ctxt->context.bytes = sizeof(*q_ctxt);
} else {
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
ASSERT(num_eq > 0);
ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
fwcmd->params.request.num_eq = num_eq;
for (i = 0; i < num_eq; i++) {
fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
fwcmd->params.request.delay[i].delay_in_microseconds =
eq_delay_array[i];
}
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
cb, cb_context, NULL, NULL, fwcmd, NULL);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}

1273
drivers/staging/benet/eth.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __etx_context_amap_h__
#define __etx_context_amap_h__
/* ETX ring context structure. */
struct BE_ETX_CONTEXT_AMAP {
u8 tx_cidx[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 tx_pidx[11]; /* DWORD 1 */
u8 rsvd2; /* DWORD 1 */
u8 tx_ring_size[4]; /* DWORD 1 */
u8 pd_id[5]; /* DWORD 1 */
u8 pd_id_not_valid; /* DWORD 1 */
u8 cq_id_send[10]; /* DWORD 1 */
u8 rsvd3[32]; /* DWORD 2 */
u8 rsvd4[32]; /* DWORD 3 */
u8 cur_bytes[32]; /* DWORD 4 */
u8 max_bytes[32]; /* DWORD 5 */
u8 time_stamp[32]; /* DWORD 6 */
u8 rsvd5[11]; /* DWORD 7 */
u8 func; /* DWORD 7 */
u8 rsvd6[20]; /* DWORD 7 */
u8 cur_txd_count[32]; /* DWORD 8 */
u8 max_txd_count[32]; /* DWORD 9 */
u8 rsvd7[32]; /* DWORD 10 */
u8 rsvd8[32]; /* DWORD 11 */
u8 rsvd9[32]; /* DWORD 12 */
u8 rsvd10[32]; /* DWORD 13 */
u8 rsvd11[32]; /* DWORD 14 */
u8 rsvd12[32]; /* DWORD 15 */
} __packed;
struct ETX_CONTEXT_AMAP {
u32 dw[16];
};
#endif /* __etx_context_amap_h__ */

View File

@ -0,0 +1,565 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
int
be_function_internal_query_firmware_config(struct be_function_object *pfob,
struct BE_FIRMWARE_CONFIG *config)
{
struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.va = config;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
NULL, NULL, NULL, fwcmd, &rc);
error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
This allocates and initializes a function object based on the information
provided by upper layer drivers.
Returns BE_SUCCESS on success and an appropriate int on failure.
A function object represents a single BladeEngine (logical) PCI function.
That is a function object either represents
the networking side of BladeEngine or the iSCSI side of BladeEngine.
This routine will also detect and create an appropriate PD object for the
PCI function as needed.
*/
int
be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type,
struct ring_desc *mailbox, struct be_function_object *pfob)
{
int status;
ASSERT(pfob); /* not a magic assert */
ASSERT(function_type <= 2);
TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
(function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
(function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
"Arm")), pfob);
memset(pfob, 0, sizeof(*pfob));
pfob->type = function_type;
pfob->csr_va = csr_va;
pfob->db_va = db_va;
pfob->pci_va = pci_va;
spin_lock_init(&pfob->cq_lock);
spin_lock_init(&pfob->post_lock);
spin_lock_init(&pfob->mcc_context_lock);
pfob->pci_function_number = 1;
pfob->emulate = false;
TRACE(DL_NOTE, "Non-emulation mode");
status = be_drive_POST(pfob);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "BladeEngine POST failed.");
goto error;
}
/* Initialize the mailbox */
status = be_mpu_init_mailbox(pfob, mailbox);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to initialize mailbox.");
goto error;
}
/*
* Cache the firmware config for ASSERTs in hwclib and later
* driver queries.
*/
status = be_function_internal_query_firmware_config(pfob,
&pfob->fw_config);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to query firmware config.");
goto error;
}
error:
if (status != BE_SUCCESS) {
/* No cleanup necessary */
TRACE(DL_ERR, "Failed to create function.");
memset(pfob, 0, sizeof(*pfob));
}
return status;
}
/*
This routine drops the reference count on a given function object. Once
the reference count falls to zero, the function object is destroyed and all
resources held are freed.
FunctionObject - The function object to drop the reference to.
*/
int be_function_object_destroy(struct be_function_object *pfob)
{
TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
pfob);
ASSERT(pfob->mcc == NULL);
return BE_SUCCESS;
}
int be_function_cleanup(struct be_function_object *pfob)
{
int status = 0;
u32 isr;
u32 host_intr;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
status = be_rxf_multicast_config(pfob, false, 0,
NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
}
/* VLAN */
status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
/*
* MCC Queue -- Switches to mailbox mode. May want to destroy
* all but the MCC CQ before this call if polling CQ is much better
* performance than polling mailbox register.
*/
if (pfob->mcc)
status = be_mcc_ring_destroy(pfob->mcc);
/*
* If interrupts are disabled, clear any CEV interrupt assertions that
* fired after we stopped processing EQs.
*/
ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
hostintr, ctrl.dw);
if (!host_intr)
if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
isr = CSR_READ(pfob, cev.isr1);
else
isr = CSR_READ(pfob, cev.isr0);
else
/* This should never happen... */
TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
/* Function object destroy */
status = be_function_object_destroy(pfob);
ASSERT(status == BE_SUCCESS);
return status;
}
void *
be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
ASSERT(wrb);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
void *
be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
void *fwcmd_va, u64 fwcmd_pa,
u32 payld_len,
u32 request_length,
u32 response_length,
u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
struct MCC_WRB_PAYLOAD_AMAP *plp;
ASSERT(wrb);
ASSERT(fwcmd_va);
header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
/*
* Assume one fragment. The caller may override the SGL by
* rewriting the 0th length and adding more entries. They
* will also need to update the sge_count.
*/
AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
upper_32_bits(fwcmd_pa));
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob)
{
struct MCC_WRB_AMAP *wrb = NULL;
u32 offset;
if (pfob->mcc)
wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
else {
offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
offset);
}
if (wrb)
memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
return wrb;
}
#if defined(BE_DEBUG)
void be_function_debug_print_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
struct be_mcc_wrb_context *wrb_context)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u8 embedded;
u32 n;
embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
if (embedded) {
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
} else {
header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
}
/* Save the completed count before posting for a debug assert. */
if (header) {
wrb_context->opcode = header->opcode;
wrb_context->subsystem = header->subsystem;
} else {
wrb_context->opcode = 0;
wrb_context->subsystem = 0;
}
}
#else
#define be_function_debug_print_wrb(a_, b_, c_, d_)
#endif
int
be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *rc)
{
int status;
struct be_mcc_wrb_context *wrb_context = NULL;
u64 *p;
if (q_ctxt) {
/* Initialize context. */
q_ctxt->context.internal_cb = internal_cb;
q_ctxt->context.internal_cb_context = internal_cb_context;
q_ctxt->context.cb = cb;
q_ctxt->context.cb_context = cb_context;
if (rc) {
q_ctxt->context.copy.length = rc->length;
q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
q_ctxt->context.copy.va = rc->va;
} else
q_ctxt->context.copy.length = 0;
q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
/* Queue this request */
status = be_function_queue_mcc_wrb(pfob, q_ctxt);
goto Error;
}
/*
* Allocate a WRB context struct to hold the callback pointers,
* status, etc. This is required if commands complete out of order.
*/
wrb_context = _be_mcc_allocate_wrb_context(pfob);
if (!wrb_context) {
TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
status = BE_STATUS_SYSTEM_RESOURCES;
goto Error;
}
/* Initialize context. */
memset(wrb_context, 0, sizeof(*wrb_context));
wrb_context->internal_cb = internal_cb;
wrb_context->internal_cb_context = internal_cb_context;
wrb_context->cb = cb;
wrb_context->cb_context = cb_context;
if (rc) {
wrb_context->copy.length = rc->length;
wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
wrb_context->copy.va = rc->va;
} else
wrb_context->copy.length = 0;
wrb_context->wrb = wrb;
/*
* Copy the context pointer into the WRB opaque tag field.
* Verify assumption of 64-bit tag with a compile time assert.
*/
p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
*p = (u64)(size_t)wrb_context;
/* Print info about this FWCMD for debug builds. */
be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/*
* we're in WRB mode, pass to the mcc layer
*/
status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
} else {
/*
* we're in mailbox mode
*/
status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
/* mailbox mode always completes synchronously */
ASSERT(status != BE_STATUS_PENDING);
}
Error:
return status;
}
int
be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context, mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context)
{
struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
fwcmd->params.request.id = id;
fwcmd->params.request.ring_type = ring_type;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
internal_cb, internal_cb_context, fwcmd, NULL);
if (status != BE_SUCCESS && status != BE_PENDING) {
TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
id, ring_type);
goto Error;
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
void
be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
{
u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
u32 i = 0;
u64 pa = rd->pa;
__le64 lepa;
ASSERT(pa_list);
ASSERT(pa);
for (i = 0; i < min(num_pages, max_num); i++) {
lepa = cpu_to_le64(pa);
pa_list[i].lo = (u32)lepa;
pa_list[i].hi = upper_32_bits(lepa);
pa += PAGE_SIZE;
}
}
/*-----------------------------------------------------------------------------
* Function: be_function_get_fw_version
* Retrieves the firmware version on the adpater. If the callback is
* NULL this call executes synchronously. If the callback is not NULL,
* the returned status will be BE_PENDING if the command was issued
* successfully.
* pfob -
* fwv - Pointer to response buffer if callback is NULL.
* cb - Callback function invoked when the FWCMD completes.
* cb_context - Passed to the callback function.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*---------------------------------------------------------------------------
*/
int
be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
mcc_wrb_cqe_callback cb, void *cb_context)
{
int status = BE_SUCCESS;
struct MCC_WRB_AMAP *wrb = NULL;
struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
if (!cb && !fwv) {
TRACE(DL_ERR, "callback and response buffer NULL!");
status = BE_NOT_OK;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.va = fwv;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
cb_context, NULL, NULL, fwcmd, &rc);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
int
be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt)
{
int status;
ASSERT(q_ctxt);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/* We're in ring mode. Queue this item. */
pfob->mcc->backlog_length++;
list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
status = BE_PENDING;
} else {
status = BE_NOT_OK;
}
return status;
}

View File

@ -0,0 +1,222 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_amap_h__
#define __fwcmd_common_amap_h__
#include "host_struct.h"
/* --- PHY_LINK_DUPLEX_ENUM --- */
#define PHY_LINK_DUPLEX_NONE (0)
#define PHY_LINK_DUPLEX_HALF (1)
#define PHY_LINK_DUPLEX_FULL (2)
/* --- PHY_LINK_SPEED_ENUM --- */
#define PHY_LINK_SPEED_ZERO (0) /* No link. */
#define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */
#define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */
#define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */
#define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */
/* --- PHY_LINK_FAULT_ENUM --- */
#define PHY_LINK_FAULT_NONE (0) /* No fault status
available or detected */
#define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */
#define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */
/* --- BE_ULP_MASK --- */
#define BE_ULP0_MASK (1)
#define BE_ULP1_MASK (2)
#define BE_ULP2_MASK (4)
/* --- NTWK_ACTIVE_PORT --- */
#define NTWK_PORT_A (0) /* Port A is currently active */
#define NTWK_PORT_B (1) /* Port B is currently active */
#define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */
/* --- NTWK_LINK_TYPE --- */
#define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event
applies to BladeEngine's
Physical Ports
*/
#define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event
reported by BladeExchange.
This applies only when the
VLD feature is enabled
*/
/*
* --- FWCMD_MAC_TYPE_ENUM ---
* This enum defines the types of MAC addresses in the RXF MAC Address Table.
*/
#define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */
#define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */
#define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */
#define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */
/* --- FWCMD_RING_TYPE_ENUM --- */
#define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */
/* FWCMD_COMMON_ETH_RX_CREATE. */
#define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */
/* FWCMD_COMMON_ETH_TX_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */
/* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */
/* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */
/* FWCMD_COMMON_TPM_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */
/* FWCMD_COMMONTPM_TDEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */
/* FWCMD_COMMON_TPM_RQ_CREATE. */
#define FWCMD_RING_TYPE_MCC (8) /* Ring created with */
/* FWCMD_COMMON_MCC_CREATE. */
#define FWCMD_RING_TYPE_CQ (9) /* Ring created with */
/* FWCMD_COMMON_CQ_CREATE. */
#define FWCMD_RING_TYPE_EQ (10) /* Ring created with */
/* FWCMD_COMMON_EQ_CREATE. */
#define FWCMD_RING_TYPE_QP (11) /* Ring created with */
/* FWCMD_RDMA_QP_CREATE. */
/* --- ETH_TX_RING_TYPE_ENUM --- */
#define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for
forwarding packets */
#define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending
network packets. */
#define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the
port specified in the command
header.port_number field.
Rings of this type are
NOT subject to the
failover logic implemented
in the BladeEngine.
*/
/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
#define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */
/* field is valid. */
#define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */
/* field is valid. */
#define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */
/*field is valid. */
#define QOS_VLAN_TAG (8) /* domain_VLAN_tag field
is valid. */
#define QOS_FABRIC_ID (16) /* fabric_domain_ID field
is valid. */
#define QOS_OEM_PARAMS (32) /* qos_params_oem field
is valid. */
#define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI
field is valid. */
/*
* --- FAILOVER_CONFIG_ENUM ---
* Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */
/* port failover setting. */
#define FAILOVER_CONFIG_ON (1) /* Automatic port failover
on link down is enabled. */
#define FAILOVER_CONFIG_OFF (2) /* Automatic port failover
on link down is disabled. */
/*
* --- FAILOVER_PORT_ENUM ---
* Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_PORT_A (0) /* Selects port A. */
#define FAILOVER_PORT_B (1) /* Selects port B. */
#define FAILOVER_PORT_NONE (15) /* No port change requested. */
/*
* --- MGMT_FLASHROM_OPCODE ---
* Flash ROM operation code
*/
#define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data
to Flash ROM */
#define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to
ARM's DDR - do not flash */
#define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component
from FlashROM */
#define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component
from Flash ROM */
#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a
component */
/*
* --- MGMT_FLASHROM_OPTYPE ---
* Flash ROM operation type
*/
#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
IPSec (optional) and EP
firmware */
#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
#define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2)
#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5)
#define MGMT_FLASHROM_OPTYPE_CFG_INI (6)
#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
/*
* --- FLASHROM_TYPE ---
* Flash ROM manufacturers supported in the f/w
*/
#define INTEL (0)
#define SPANSION (1)
#define MICRON (2)
/* --- DDR_CAS_TYPE --- */
#define CAS_3 (0)
#define CAS_4 (1)
#define CAS_5 (2)
/* --- DDR_SIZE_TYPE --- */
#define SIZE_256MB (0)
#define SIZE_512MB (1)
/* --- DDR_MODE_TYPE --- */
#define DDR_NO_ECC (0)
#define DDR_ECC (1)
/* --- INTERFACE_10GB_TYPE --- */
#define CX4_TYPE (0)
#define XFP_TYPE (1)
/* --- BE_CHIP_MAX_MTU --- */
#define CHIP_MAX_MTU (9000)
/* --- XAUI_STATE_ENUM --- */
#define XAUI_STATE_ENABLE (0) /* This MUST be the default
value for all requests
which set/change
equalization parameter. */
#define XAUI_STATE_DISABLE (255) /* The XAUI for both ports
may be disabled for EMI
tests. There is no
provision for turning off
individual ports.
*/
/* --- BE_ASIC_REVISION --- */
#define BE_ASIC_REV_A0 (1)
#define BE_ASIC_REV_A1 (2)
#endif /* __fwcmd_common_amap_h__ */

View File

@ -0,0 +1,717 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_bmap_h__
#define __fwcmd_common_bmap_h__
#include "fwcmd_types_bmap.h"
#include "fwcmd_hdr_bmap.h"
#if defined(__BIG_ENDIAN)
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
#else
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
struct BE_LINK_STATUS {
u8 mac0_duplex;
u8 mac0_speed;
u8 mac1_duplex;
u8 mac1_speed;
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u8 active_port;
u8 rsvd0;
u8 mac0_fault;
u8 mac1_fault;
u16 rsvd1;
} __packed;
#endif
struct FWCMD_COMMON_ANON_170_REQUEST {
u32 rsvd0;
} __packed;
union LINK_STATUS_QUERY_PARAMS {
struct BE_LINK_STATUS response;
struct FWCMD_COMMON_ANON_170_REQUEST request;
} __packed;
/*
* Queries the the link status for all ports. The valid values below
* DO NOT indicate that a particular duplex or speed is supported by
* BladeEngine. These enumerations simply list all possible duplexes
* and speeds for any port. Consult BladeEngine product documentation
* for the supported parameters.
*/
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
union FWCMD_HEADER header;
union LINK_STATUS_QUERY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_171_REQUEST {
u8 type;
u8 port;
u8 mac1;
u8 permanent;
} __packed;
struct FWCMD_COMMON_ANON_172_RESPONSE {
struct MAC_ADDRESS_FORMAT mac;
} __packed;
union NTWK_MAC_QUERY_PARAMS {
struct FWCMD_COMMON_ANON_171_REQUEST request;
struct FWCMD_COMMON_ANON_172_RESPONSE response;
} __packed;
/* Queries one MAC address. */
struct FWCMD_COMMON_NTWK_MAC_QUERY {
union FWCMD_HEADER header;
union NTWK_MAC_QUERY_PARAMS params;
} __packed;
struct MAC_SET_PARAMS_IN {
u8 type;
u8 port;
u8 mac1;
u8 invalidate;
struct MAC_ADDRESS_FORMAT mac;
} __packed;
struct MAC_SET_PARAMS_OUT {
u32 rsvd0;
} __packed;
union MAC_SET_PARAMS {
struct MAC_SET_PARAMS_IN request;
struct MAC_SET_PARAMS_OUT response;
} __packed;
/* Sets a MAC address. */
struct FWCMD_COMMON_NTWK_MAC_SET {
union FWCMD_HEADER header;
union MAC_SET_PARAMS params;
} __packed;
/* MAC address list. */
struct NTWK_MULTICAST_MAC_LIST {
u8 byte[6];
} __packed;
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
u16 num_mac;
u8 promiscuous;
u8 rsvd0;
struct NTWK_MULTICAST_MAC_LIST mac[32];
} __packed;
struct FWCMD_COMMON_ANON_174_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_173_PARAMS {
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_174_RESPONSE response;
} __packed;
/*
* Sets multicast address hash. The MPU will merge the MAC address lists
* from all clients, including the networking and storage functions.
* This command may fail if the final merged list of MAC addresses exceeds
* 32 entries.
*/
struct FWCMD_COMMON_NTWK_MULTICAST_SET {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_173_PARAMS params;
} __packed;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
u16 num_vlan;
u8 promiscuous;
u8 rsvd0;
u16 vlan_tag[32];
} __packed;
struct FWCMD_COMMON_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_175_PARAMS {
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_176_RESPONSE response;
} __packed;
/*
* Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
* clients, including the networking and storage functions. This command
* may fail if the final vlan_tag array (from all functions) is longer
* than 32 entries.
*/
struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_175_PARAMS params;
} __packed;
struct RING_DESTROY_REQUEST {
u16 ring_type;
u16 id;
u8 bypass_flush;
u8 rsvd0;
u16 rsvd1;
} __packed;
struct FWCMD_COMMON_ANON_190_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_189_PARAMS {
struct RING_DESTROY_REQUEST request;
struct FWCMD_COMMON_ANON_190_RESPONSE response;
} __packed;
/*
* Command for destroying any ring. The connection(s) using the ring should
* be quiesced before destroying the ring.
*/
struct FWCMD_COMMON_RING_DESTROY {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_189_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_192_REQUEST {
u16 num_pages;
u16 rsvd0;
struct CQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[4];
} __packed ;
struct FWCMD_COMMON_ANON_193_RESPONSE {
u16 cq_id;
} __packed ;
union FWCMD_COMMON_ANON_191_PARAMS {
struct FWCMD_COMMON_ANON_192_REQUEST request;
struct FWCMD_COMMON_ANON_193_RESPONSE response;
} __packed ;
/*
* Command for creating a completion queue. A Completion Queue must span
* at least 1 page and at most 4 pages. Each completion queue entry
* is 16 bytes regardless of CQ entry format. Thus the ring must be
* at least 256 entries deep (corresponding to 1 page) and can be at
* most 1024 entries deep (corresponding to 4 pages). The number of
* pages posted must contain the CQ ring size as encoded in the context.
*
*/
struct FWCMD_COMMON_CQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_191_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_198_REQUEST {
u16 num_pages;
u16 rsvd0;
struct EQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_199_RESPONSE {
u16 eq_id;
} __packed ;
union FWCMD_COMMON_ANON_197_PARAMS {
struct FWCMD_COMMON_ANON_198_REQUEST request;
struct FWCMD_COMMON_ANON_199_RESPONSE response;
} __packed ;
/*
* Command for creating a event queue. An Event Queue must span at least
* 1 page and at most 8 pages. The number of pages posted must contain
* the EQ ring. The ring is defined by the size of the EQ entries (encoded
* in the context) and the number of EQ entries (also encoded in the
* context).
*/
struct FWCMD_COMMON_EQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_197_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_201_REQUEST {
u16 cq_id;
u16 bcmc_cq_id;
u16 num_pages;
u16 rsvd0;
struct PHYS_ADDR pages[2];
} __packed;
struct FWCMD_COMMON_ANON_202_RESPONSE {
u16 id;
} __packed;
union FWCMD_COMMON_ANON_200_PARAMS {
struct FWCMD_COMMON_ANON_201_REQUEST request;
struct FWCMD_COMMON_ANON_202_RESPONSE response;
} __packed;
/*
* Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D
* entries (8 bytes each). An ERX ring must be 1024 entries deep
* (corresponding to 2 pages).
*/
struct FWCMD_COMMON_ETH_RX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_200_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_204_REQUEST {
u16 num_pages;
u8 ulp_num;
u8 type;
struct ETX_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_205_RESPONSE {
u16 cid;
u8 ulp_num;
u8 rsvd0;
} __packed ;
union FWCMD_COMMON_ANON_203_PARAMS {
struct FWCMD_COMMON_ANON_204_REQUEST request;
struct FWCMD_COMMON_ANON_205_RESPONSE response;
} __packed ;
/*
* Command for creating an Ethernet transmit ring. An ETX ring contains
* ETH_WRB entries (16 bytes each). An ETX ring must be at least 256
* entries deep (corresponding to 1 page) and at most 2k entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_ETH_TX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_203_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_222_REQUEST {
u16 num_pages;
u16 rsvd0;
struct MCC_RING_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_223_RESPONSE {
u16 id;
} __packed ;
union FWCMD_COMMON_ANON_221_PARAMS {
struct FWCMD_COMMON_ANON_222_REQUEST request;
struct FWCMD_COMMON_ANON_223_RESPONSE response;
} __packed ;
/*
* Command for creating the MCC ring. An MCC ring must be at least 16
* entries deep (corresponding to 1 page) and at most 128 entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_MCC_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_221_PARAMS params;
} __packed ;
struct GET_QOS_IN {
u32 qos_params_rsvd;
} __packed;
struct GET_QOS_OUT {
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
union GET_QOS_PARAMS {
struct GET_QOS_IN request;
struct GET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_GET_QOS {
union FWCMD_HEADER header;
union GET_QOS_PARAMS params;
} __packed;
struct SET_QOS_IN {
u32 valid_flags;
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
struct SET_QOS_OUT {
u32 qos_params_rsvd;
} __packed;
union SET_QOS_PARAMS {
struct SET_QOS_IN request;
struct SET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_SET_QOS {
union FWCMD_HEADER header;
union SET_QOS_PARAMS params;
} __packed;
struct SET_FRAME_SIZE_IN {
u32 max_tx_frame_size;
u32 max_rx_frame_size;
} __packed;
struct SET_FRAME_SIZE_OUT {
u32 chip_max_tx_frame_size;
u32 chip_max_rx_frame_size;
} __packed;
union SET_FRAME_SIZE_PARAMS {
struct SET_FRAME_SIZE_IN request;
struct SET_FRAME_SIZE_OUT response;
} __packed;
/* Set frame size command. Only host domain may issue this command. */
struct FWCMD_COMMON_SET_FRAME_SIZE {
union FWCMD_HEADER header;
union SET_FRAME_SIZE_PARAMS params;
} __packed;
struct FORCE_FAILOVER_IN {
u32 move_to_port;
u32 failover_config;
} __packed;
struct FWCMD_COMMON_ANON_231_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_230_PARAMS {
struct FORCE_FAILOVER_IN request;
struct FWCMD_COMMON_ANON_231_RESPONSE response;
} __packed;
/*
* Use this command to control failover in BladeEngine. It may be used
* to failback to a restored port or to forcibly move traffic from
* one port to another. It may also be used to enable or disable the
* automatic failover feature. This command can only be issued by domain
* 0.
*/
struct FWCMD_COMMON_FORCE_FAILOVER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_230_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_240_REQUEST {
u64 context;
} __packed;
struct FWCMD_COMMON_ANON_241_RESPONSE {
u64 context;
} __packed;
union FWCMD_COMMON_ANON_239_PARAMS {
struct FWCMD_COMMON_ANON_240_REQUEST request;
struct FWCMD_COMMON_ANON_241_RESPONSE response;
} __packed;
/*
* This command can be used by clients as a no-operation request. Typical
* uses for drivers are as a heartbeat mechanism, or deferred processing
* catalyst. The ARM will always complete this command with a good completion.
* The 64-bit parameter is not touched by the ARM processor.
*/
struct FWCMD_COMMON_NOP {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_239_PARAMS params;
} __packed;
struct NTWK_RX_FILTER_SETTINGS {
u8 promiscuous;
u8 ip_cksum;
u8 tcp_cksum;
u8 udp_cksum;
u8 pass_err;
u8 pass_ckerr;
u8 strip_crc;
u8 mcast_en;
u8 bcast_en;
u8 mcast_promiscuous_en;
u8 unicast_en;
u8 vlan_promiscuous;
} __packed;
union FWCMD_COMMON_ANON_242_PARAMS {
struct NTWK_RX_FILTER_SETTINGS request;
struct NTWK_RX_FILTER_SETTINGS response;
} __packed;
/*
* This command is used to modify the ethernet receive filter configuration.
* Only domain 0 network function drivers may issue this command. The
* applied configuration is returned in the response payload. Note:
* Some receive packet filter settings are global on BladeEngine and
* can affect both the storage and network function clients that the
* BladeEngine hardware and firmware serve. Additionaly, depending
* on the revision of BladeEngine, some ethernet receive filter settings
* are dependent on others. If a dependency exists between settings
* for the BladeEngine revision, and the command request settings do
* not meet the dependency requirement, the invalid settings will not
* be applied despite the comand succeeding. For example: a driver may
* request to enable broadcast packets, but not enable multicast packets.
* On early revisions of BladeEngine, there may be no distinction between
* broadcast and multicast filters, so broadcast could not be enabled
* without enabling multicast. In this scenario, the comand would still
* succeed, but the response payload would indicate the previously
* configured broadcast and multicast setting.
*/
struct FWCMD_COMMON_NTWK_RX_FILTER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_242_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_244_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
u8 firmware_version_string[32];
u8 fw_on_flash_version_string[32];
} __packed;
union FWCMD_COMMON_ANON_243_PARAMS {
struct FWCMD_COMMON_ANON_244_REQUEST request;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
} __packed;
/* This comand retrieves the firmware version. */
struct FWCMD_COMMON_GET_FW_VERSION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_243_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_246_REQUEST {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
struct FWCMD_COMMON_ANON_247_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_245_PARAMS {
struct FWCMD_COMMON_ANON_246_REQUEST request;
struct FWCMD_COMMON_ANON_247_RESPONSE response;
} __packed;
/*
* This comand is used to program BladeEngine flow control behavior.
* Only the host networking driver is allowed to use this comand.
*/
struct FWCMD_COMMON_SET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_245_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_249_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_ANON_250_RESPONSE {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
union FWCMD_COMMON_ANON_248_PARAMS {
struct FWCMD_COMMON_ANON_249_REQUEST request;
struct FWCMD_COMMON_ANON_250_RESPONSE response;
} __packed;
/* This comand is used to read BladeEngine flow control settings. */
struct FWCMD_COMMON_GET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_248_PARAMS params;
} __packed;
struct EQ_DELAY_PARAMS {
u32 eq_id;
u32 delay_in_microseconds;
} __packed;
struct FWCMD_COMMON_ANON_257_REQUEST {
u32 num_eq;
u32 rsvd0;
struct EQ_DELAY_PARAMS delay[16];
} __packed;
struct FWCMD_COMMON_ANON_258_RESPONSE {
u32 delay_resolution_in_microseconds;
u32 delay_max_in_microseconds;
} __packed;
union MODIFY_EQ_DELAY_PARAMS {
struct FWCMD_COMMON_ANON_257_REQUEST request;
struct FWCMD_COMMON_ANON_258_RESPONSE response;
} __packed;
/* This comand changes the EQ delay for a given set of EQs. */
struct FWCMD_COMMON_MODIFY_EQ_DELAY {
union FWCMD_HEADER header;
union MODIFY_EQ_DELAY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_260_REQUEST {
u32 rsvd0;
} __packed;
struct BE_FIRMWARE_CONFIG {
u16 be_config_number;
u16 asic_revision;
u32 nic_ulp_mask;
u32 tulp_mask;
u32 iscsi_ulp_mask;
u32 rdma_ulp_mask;
u32 rsvd0[4];
u32 eth_tx_id_start;
u32 eth_tx_id_count;
u32 eth_rx_id_start;
u32 eth_rx_id_count;
u32 tpm_wrbq_id_start;
u32 tpm_wrbq_id_count;
u32 tpm_defq_id_start;
u32 tpm_defq_id_count;
u32 iscsi_wrbq_id_start;
u32 iscsi_wrbq_id_count;
u32 iscsi_defq_id_start;
u32 iscsi_defq_id_count;
u32 rdma_qp_id_start;
u32 rdma_qp_id_count;
u32 rsvd1[8];
} __packed;
union FWCMD_COMMON_ANON_259_PARAMS {
struct FWCMD_COMMON_ANON_260_REQUEST request;
struct BE_FIRMWARE_CONFIG response;
} __packed;
/*
* This comand queries the current firmware configuration parameters.
* The static configuration type is defined by be_config_number. This
* differentiates different BladeEngine builds, such as iSCSI Initiator
* versus iSCSI Target. For a given static configuration, the Upper
* Layer Protocol (ULP) processors may be reconfigured to support different
* protocols. Each ULP processor supports one or more protocols. The
* masks indicate which processors are configured for each protocol.
* For a given static configuration, the number of TCP connections
* supported for each protocol may vary. The *_id_start and *_id_count
* variables define a linear range of IDs that are available for each
* supported protocol. The *_id_count may be used by the driver to allocate
* the appropriate number of connection resources. The *_id_start may
* be used to map the arbitrary range of IDs to a zero-based range
* of indices.
*/
struct FWCMD_COMMON_FIRMWARE_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_259_PARAMS params;
} __packed;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
u32 emph_lev_sel_port0;
u32 emph_lev_sel_port1;
u8 xaui_vo_sel;
u8 xaui_state;
u16 rsvd0;
u32 xaui_eq_vector;
} __packed;
struct FWCMD_COMMON_ANON_262_REQUEST {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_261_PARAMS {
struct FWCMD_COMMON_ANON_262_REQUEST request;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
} __packed;
/*
* This comand can be used to read XAUI equalization parameters. The
* ARM firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_261_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_264_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_263_PARAMS {
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
struct FWCMD_COMMON_ANON_264_RESPONSE response;
} __packed;
/*
* This comand can be used to set XAUI equalization parameters. The ARM
* firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_263_PARAMS params;
} __packed;
#endif /* __fwcmd_common_bmap_h__ */

View File

@ -0,0 +1,280 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_eth_bmap_h__
#define __fwcmd_eth_bmap_h__
#include "fwcmd_hdr_bmap.h"
#include "fwcmd_types_bmap.h"
struct MIB_ETH_STATISTICS_PARAMS_IN {
u32 rsvd0;
} __packed;
struct BE_RXF_STATS {
u32 p0recvdtotalbytesLSD; /* DWORD 0 */
u32 p0recvdtotalbytesMSD; /* DWORD 1 */
u32 p0recvdtotalframes; /* DWORD 2 */
u32 p0recvdunicastframes; /* DWORD 3 */
u32 p0recvdmulticastframes; /* DWORD 4 */
u32 p0recvdbroadcastframes; /* DWORD 5 */
u32 p0crcerrors; /* DWORD 6 */
u32 p0alignmentsymerrs; /* DWORD 7 */
u32 p0pauseframesrecvd; /* DWORD 8 */
u32 p0controlframesrecvd; /* DWORD 9 */
u32 p0inrangelenerrors; /* DWORD 10 */
u32 p0outrangeerrors; /* DWORD 11 */
u32 p0frametoolongerrors; /* DWORD 12 */
u32 p0droppedaddressmatch; /* DWORD 13 */
u32 p0droppedvlanmismatch; /* DWORD 14 */
u32 p0ipdroppedtoosmall; /* DWORD 15 */
u32 p0ipdroppedtooshort; /* DWORD 16 */
u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */
u32 p0tcpdroppedlen; /* DWORD 18 */
u32 p0droppedrunt; /* DWORD 19 */
u32 p0recvd64; /* DWORD 20 */
u32 p0recvd65_127; /* DWORD 21 */
u32 p0recvd128_256; /* DWORD 22 */
u32 p0recvd256_511; /* DWORD 23 */
u32 p0recvd512_1023; /* DWORD 24 */
u32 p0recvd1518_1522; /* DWORD 25 */
u32 p0recvd1522_2047; /* DWORD 26 */
u32 p0recvd2048_4095; /* DWORD 27 */
u32 p0recvd4096_8191; /* DWORD 28 */
u32 p0recvd8192_9216; /* DWORD 29 */
u32 p0rcvdipcksmerrs; /* DWORD 30 */
u32 p0recvdtcpcksmerrs; /* DWORD 31 */
u32 p0recvdudpcksmerrs; /* DWORD 32 */
u32 p0recvdnonrsspackets; /* DWORD 33 */
u32 p0recvdippackets; /* DWORD 34 */
u32 p0recvdchute1packets; /* DWORD 35 */
u32 p0recvdchute2packets; /* DWORD 36 */
u32 p0recvdchute3packets; /* DWORD 37 */
u32 p0recvdipsecpackets; /* DWORD 38 */
u32 p0recvdmanagementpackets; /* DWORD 39 */
u32 p0xmitbyteslsd; /* DWORD 40 */
u32 p0xmitbytesmsd; /* DWORD 41 */
u32 p0xmitunicastframes; /* DWORD 42 */
u32 p0xmitmulticastframes; /* DWORD 43 */
u32 p0xmitbroadcastframes; /* DWORD 44 */
u32 p0xmitpauseframes; /* DWORD 45 */
u32 p0xmitcontrolframes; /* DWORD 46 */
u32 p0xmit64; /* DWORD 47 */
u32 p0xmit65_127; /* DWORD 48 */
u32 p0xmit128_256; /* DWORD 49 */
u32 p0xmit256_511; /* DWORD 50 */
u32 p0xmit512_1023; /* DWORD 51 */
u32 p0xmit1518_1522; /* DWORD 52 */
u32 p0xmit1522_2047; /* DWORD 53 */
u32 p0xmit2048_4095; /* DWORD 54 */
u32 p0xmit4096_8191; /* DWORD 55 */
u32 p0xmit8192_9216; /* DWORD 56 */
u32 p0rxfifooverflowdropped; /* DWORD 57 */
u32 p0ipseclookupfaileddropped; /* DWORD 58 */
u32 p1recvdtotalbytesLSD; /* DWORD 59 */
u32 p1recvdtotalbytesMSD; /* DWORD 60 */
u32 p1recvdtotalframes; /* DWORD 61 */
u32 p1recvdunicastframes; /* DWORD 62 */
u32 p1recvdmulticastframes; /* DWORD 63 */
u32 p1recvdbroadcastframes; /* DWORD 64 */
u32 p1crcerrors; /* DWORD 65 */
u32 p1alignmentsymerrs; /* DWORD 66 */
u32 p1pauseframesrecvd; /* DWORD 67 */
u32 p1controlframesrecvd; /* DWORD 68 */
u32 p1inrangelenerrors; /* DWORD 69 */
u32 p1outrangeerrors; /* DWORD 70 */
u32 p1frametoolongerrors; /* DWORD 71 */
u32 p1droppedaddressmatch; /* DWORD 72 */
u32 p1droppedvlanmismatch; /* DWORD 73 */
u32 p1ipdroppedtoosmall; /* DWORD 74 */
u32 p1ipdroppedtooshort; /* DWORD 75 */
u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */
u32 p1tcpdroppedlen; /* DWORD 77 */
u32 p1droppedrunt; /* DWORD 78 */
u32 p1recvd64; /* DWORD 79 */
u32 p1recvd65_127; /* DWORD 80 */
u32 p1recvd128_256; /* DWORD 81 */
u32 p1recvd256_511; /* DWORD 82 */
u32 p1recvd512_1023; /* DWORD 83 */
u32 p1recvd1518_1522; /* DWORD 84 */
u32 p1recvd1522_2047; /* DWORD 85 */
u32 p1recvd2048_4095; /* DWORD 86 */
u32 p1recvd4096_8191; /* DWORD 87 */
u32 p1recvd8192_9216; /* DWORD 88 */
u32 p1rcvdipcksmerrs; /* DWORD 89 */
u32 p1recvdtcpcksmerrs; /* DWORD 90 */
u32 p1recvdudpcksmerrs; /* DWORD 91 */
u32 p1recvdnonrsspackets; /* DWORD 92 */
u32 p1recvdippackets; /* DWORD 93 */
u32 p1recvdchute1packets; /* DWORD 94 */
u32 p1recvdchute2packets; /* DWORD 95 */
u32 p1recvdchute3packets; /* DWORD 96 */
u32 p1recvdipsecpackets; /* DWORD 97 */
u32 p1recvdmanagementpackets; /* DWORD 98 */
u32 p1xmitbyteslsd; /* DWORD 99 */
u32 p1xmitbytesmsd; /* DWORD 100 */
u32 p1xmitunicastframes; /* DWORD 101 */
u32 p1xmitmulticastframes; /* DWORD 102 */
u32 p1xmitbroadcastframes; /* DWORD 103 */
u32 p1xmitpauseframes; /* DWORD 104 */
u32 p1xmitcontrolframes; /* DWORD 105 */
u32 p1xmit64; /* DWORD 106 */
u32 p1xmit65_127; /* DWORD 107 */
u32 p1xmit128_256; /* DWORD 108 */
u32 p1xmit256_511; /* DWORD 109 */
u32 p1xmit512_1023; /* DWORD 110 */
u32 p1xmit1518_1522; /* DWORD 111 */
u32 p1xmit1522_2047; /* DWORD 112 */
u32 p1xmit2048_4095; /* DWORD 113 */
u32 p1xmit4096_8191; /* DWORD 114 */
u32 p1xmit8192_9216; /* DWORD 115 */
u32 p1rxfifooverflowdropped; /* DWORD 116 */
u32 p1ipseclookupfaileddropped; /* DWORD 117 */
u32 pxdroppednopbuf; /* DWORD 118 */
u32 pxdroppednotxpb; /* DWORD 119 */
u32 pxdroppednoipsecbuf; /* DWORD 120 */
u32 pxdroppednoerxdescr; /* DWORD 121 */
u32 pxdroppednotpredescr; /* DWORD 122 */
u32 pxrecvdmanagementportpackets; /* DWORD 123 */
u32 pxrecvdmanagementportbytes; /* DWORD 124 */
u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */
u32 pxrecvdmanagementporterrors; /* DWORD 126 */
u32 pxxmitmanagementportpackets; /* DWORD 127 */
u32 pxxmitmanagementportbytes; /* DWORD 128 */
u32 pxxmitmanagementportpause; /* DWORD 129 */
u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */
u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */
u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */
u32 pxipsecrunt; /* DWORD 134 */
u32 pxipsecaddressmismatchdropped; /* DWORD 135 */
u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */
u32 pxipsecframestoolong; /* DWORD 137 */
u32 pxipsectotalipframes; /* DWORD 138 */
u32 pxipseciptoosmall; /* DWORD 139 */
u32 pxipseciptooshort; /* DWORD 140 */
u32 pxipseciphdrtoosmall; /* DWORD 141 */
u32 pxipsectcphdrbad; /* DWORD 142 */
u32 pxrecvdipsecchute1; /* DWORD 143 */
u32 pxrecvdipsecchute2; /* DWORD 144 */
u32 pxrecvdipsecchute3; /* DWORD 145 */
u32 pxdropped7frags; /* DWORD 146 */
u32 pxdroppedfrags; /* DWORD 147 */
u32 pxdroppedinvalidfragring; /* DWORD 148 */
u32 pxnumforwardedpackets; /* DWORD 149 */
} __packed;
union MIB_ETH_STATISTICS_PARAMS {
struct MIB_ETH_STATISTICS_PARAMS_IN request;
struct BE_RXF_STATS response;
} __packed;
/*
* Query ethernet statistics. All domains may issue this command. The
* host domain drivers may optionally reset internal statistic counters
* with a query.
*/
struct FWCMD_ETH_GET_STATISTICS {
union FWCMD_HEADER header;
union MIB_ETH_STATISTICS_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_175_REQUEST {
u8 port0_promiscuous;
u8 port1_promiscuous;
u16 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_ETH_ANON_174_PARAMS {
struct FWCMD_ETH_ANON_175_REQUEST request;
struct FWCMD_ETH_ANON_176_RESPONSE response;
} __packed;
/* Enables/Disables promiscuous ethernet receive mode. */
struct FWCMD_ETH_PROMISCUOUS {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_174_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_178_REQUEST {
u32 new_fragsize_log2;
} __packed;
struct FWCMD_ETH_ANON_179_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_177_PARAMS {
struct FWCMD_ETH_ANON_178_REQUEST request;
struct FWCMD_ETH_ANON_179_RESPONSE response;
} __packed;
/*
* Sets the Ethernet RX fragment size. Only host (domain 0) networking
* drivers may issue this command. This call will fail for non-host
* protection domains. In this situation the MCC CQ status will indicate
* a failure due to insufficient priviledges. The response should be
* ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
* query the existing ethernet receive fragment size. It must use this
* fragment size for all fragments in the ethernet receive ring. If
* the command succeeds, the driver must use the frag size indicated
* in the command response since the requested frag size may not be applied
* until the next reboot. When the requested fragsize matches the response
* fragsize, this indicates the request was applied immediately.
*/
struct FWCMD_ETH_SET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_177_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_181_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_182_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_180_PARAMS {
struct FWCMD_ETH_ANON_181_REQUEST request;
struct FWCMD_ETH_ANON_182_RESPONSE response;
} __packed;
/*
* Queries the Ethernet RX fragment size. All domains may issue this
* command. The driver should call this command to determine the minimum
* required fragment size for the ethernet RX ring buffers. Drivers
* may choose to use a larger size for each fragment buffer, but BladeEngine
* will use up to the configured minimum required fragsize in each ethernet
* receive fragment buffer. For example, if the ethernet receive fragment
* size is configured to 4kB, and a driver uses 8kB fragments, a 6kB
* ethernet packet received by BladeEngine will be split accross two
* of the driver's receive framgents (4kB in one fragment buffer, and
* 2kB in the subsequent fragment buffer).
*/
struct FWCMD_ETH_GET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_180_PARAMS params;
} __packed;
#endif /* __fwcmd_eth_bmap_h__ */

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_hdr_bmap_h__
#define __fwcmd_hdr_bmap_h__
struct FWCMD_REQUEST_HEADER {
u8 opcode;
u8 subsystem;
u8 port_number;
u8 domain;
u32 timeout;
u32 request_length;
u32 rsvd0;
} __packed;
struct FWCMD_RESPONSE_HEADER {
u8 opcode;
u8 subsystem;
u8 rsvd0;
u8 domain;
u8 status;
u8 additional_status;
u16 rsvd1;
u32 response_length;
u32 actual_response_length;
} __packed;
/*
* The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
* the output FWCMD_RESPONSE_HEADER.
*/
union FWCMD_HEADER {
struct FWCMD_REQUEST_HEADER request;
struct FWCMD_RESPONSE_HEADER response;
} __packed;
#endif /* __fwcmd_hdr_bmap_h__ */

View File

@ -0,0 +1,94 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_mcc_amap_h__
#define __fwcmd_mcc_amap_h__
#include "fwcmd_opcodes.h"
/*
* Where applicable, a WRB, may contain a list of Scatter-gather elements.
* Each element supports a 64 bit address and a 32bit length field.
*/
struct BE_MCC_SGE_AMAP {
u8 pa_lo[32]; /* DWORD 0 */
u8 pa_hi[32]; /* DWORD 1 */
u8 length[32]; /* DWORD 2 */
} __packed;
struct MCC_SGE_AMAP {
u32 dw[3];
};
/*
* The design of an MCC_SGE allows up to 19 elements to be embedded
* in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
*/
struct BE_MCC_WRB_PAYLOAD_AMAP {
union {
struct BE_MCC_SGE_AMAP sgl[19];
u8 embedded[59][32]; /* DWORD 0 */
};
} __packed;
struct MCC_WRB_PAYLOAD_AMAP {
u32 dw[59];
};
/*
* This is the structure of the MCC Command WRB for commands
* sent to the Management Processing Unit (MPU). See section
* for usage in embedded and non-embedded modes.
*/
struct BE_MCC_WRB_AMAP {
u8 embedded; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 sge_count[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 special[8]; /* DWORD 0 */
u8 payload_length[32]; /* DWORD 1 */
u8 tag[2][32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 4 */
struct BE_MCC_WRB_PAYLOAD_AMAP payload;
} __packed;
struct MCC_WRB_AMAP {
u32 dw[64];
};
/* This is the structure of the MCC Completion queue entry */
struct BE_MCC_CQ_ENTRY_AMAP {
u8 completion_status[16]; /* DWORD 0 */
u8 extended_status[16]; /* DWORD 0 */
u8 mcc_tag[2][32]; /* DWORD 1 */
u8 rsvd0[27]; /* DWORD 3 */
u8 consumed; /* DWORD 3 */
u8 completed; /* DWORD 3 */
u8 hpi_buffer_completion; /* DWORD 3 */
u8 async_event; /* DWORD 3 */
u8 valid; /* DWORD 3 */
} __packed;
struct MCC_CQ_ENTRY_AMAP {
u32 dw[4];
};
/* Mailbox structures used by the MPU during bootstrap */
struct BE_MCC_MAILBOX_AMAP {
struct BE_MCC_WRB_AMAP wrb;
struct BE_MCC_CQ_ENTRY_AMAP cq;
} __packed;
struct MCC_MAILBOX_AMAP {
u32 dw[68];
};
#endif /* __fwcmd_mcc_amap_h__ */

View File

@ -0,0 +1,244 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_opcodes_amap_h__
#define __fwcmd_opcodes_amap_h__
/*
* --- FWCMD_SUBSYSTEMS ---
* The commands are grouped into the following subsystems. The subsystem
* code along with the opcode uniquely identify a particular fwcmd.
*/
#define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */
/* never used. */
#define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to
* all subsystems. See
* COMMON_SUBSYSTEM_OPCODES for opcodes
* and Common Host Configuration CMDs
* for the FWCMD descriptions.
*/
#define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */
/*
* common to Initiator and Target. See
* COMMON_ISCSI_SUBSYSTEM_OPCODES and
* Common iSCSI Initiator and Target
* CMDs for the command descriptions.
*/
#define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to
execute Ethernet commands. */
#define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used
to execute TPM commands. */
#define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used
* to execute PXE
* and UNDI specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to
execute ISCSI Initiator
specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used
to execute iSCSI Target
specific commands.between
PTL and ARM firmware.
*/
#define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to
execute iSCSI Target specific
commands.between MILI
and PTL. */
#define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to
execute iSCSI Target specific
commands between MILI
and TMD. */
#define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used
to execute proxied commands
within the host at the
explicit request of a
non priviledged domain.
This 'subsystem' is entirely
virtual from the controller
and firmware perspective as
it is implemented in host
drivers.
*/
/*
* --- COMMON_SUBSYSTEM_OPCODES ---
* These opcodes are common to both networking and storage PCI
* functions. They are used to reserve resources and configure
* BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
* subsystem code.
*/
#define OPCODE_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_SET (1)
#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
#define SUBSYSTEM_COMMON_READ_FLASHROM (1)
#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_RING_DESTROY (1)
#define SUBSYSTEM_COMMON_CQ_CREATE (1)
#define SUBSYSTEM_COMMON_EQ_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_RX_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_TX_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
#define SUBSYSTEM_COMMON_MCC_CREATE (1)
#define SUBSYSTEM_COMMON_JELL_CONFIG (1)
#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
#define SUBSYSTEM_COMMON_GET_QOS (1)
#define SUBSYSTEM_COMMON_SET_QOS (1)
#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
#define SUBSYSTEM_COMMON_SEEPROM_READ (1)
#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
#define SUBSYSTEM_COMMON_NOP (1)
#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
#define SUBSYSTEM_COMMON_GET_FAT (1)
#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_RED_CONFIG (1)
#define OPCODE_COMMON_NTWK_MAC_SET (2)
#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
#define OPCODE_COMMON_NTWK_VLAN_CONFIG (4)
#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
#define OPCODE_COMMON_READ_FLASHROM (6)
#define OPCODE_COMMON_WRITE_FLASHROM (7)
#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
#define OPCODE_COMMON_ADD_PAGE_TABLES (9)
#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
#define OPCODE_COMMON_RING_DESTROY (11)
#define OPCODE_COMMON_CQ_CREATE (12)
#define OPCODE_COMMON_EQ_CREATE (13)
#define OPCODE_COMMON_ETH_RX_CREATE (14)
#define OPCODE_COMMON_ETH_TX_CREATE (15)
#define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
#define OPCODE_COMMON_MCC_CREATE (21)
#define OPCODE_COMMON_JELL_CONFIG (22)
#define OPCODE_COMMON_FORCE_FAILOVER (23)
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
#define OPCODE_COMMON_POST_ZERO_BUFFER (26)
#define OPCODE_COMMON_GET_QOS (27)
#define OPCODE_COMMON_SET_QOS (28)
#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
#define OPCODE_COMMON_SEEPROM_READ (30)
#define OPCODE_COMMON_TCP_STATE_QUERY (31)
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
#define OPCODE_COMMON_NOP (33)
#define OPCODE_COMMON_NTWK_RX_FILTER (34)
#define OPCODE_COMMON_GET_FW_VERSION (35)
#define OPCODE_COMMON_SET_FLOW_CONTROL (36)
#define OPCODE_COMMON_GET_FLOW_CONTROL (37)
#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
#define OPCODE_COMMON_SET_FRAME_SIZE (39)
#define OPCODE_COMMON_GET_FAT (40)
#define OPCODE_COMMON_MODIFY_EQ_DELAY (41)
#define OPCODE_COMMON_FIRMWARE_CONFIG (42)
#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
#define OPCODE_COMMON_SET_VLD_CONFIG (45)
#define OPCODE_COMMON_GET_VLD_CONFIG (46)
#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
#define OPCODE_COMMON_RED_CONFIG (49)
/*
* --- ETH_SUBSYSTEM_OPCODES ---
* These opcodes are used for configuring the Ethernet interfaces. These
* opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
*/
#define OPCODE_ETH_RSS_CONFIG (1)
#define OPCODE_ETH_ACPI_CONFIG (2)
#define SUBSYSTEM_ETH_RSS_CONFIG (3)
#define SUBSYSTEM_ETH_ACPI_CONFIG (3)
#define OPCODE_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_GET_STATISTICS (3)
#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3)
#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3)
#define OPCODE_ETH_GET_STATISTICS (4)
#define OPCODE_ETH_GET_RX_FRAG_SIZE (5)
#define OPCODE_ETH_SET_RX_FRAG_SIZE (6)
/*
* --- MCC_STATUS_CODE ---
* These are the global status codes used by all subsystems
*/
#define MCC_STATUS_SUCCESS (0) /* Indicates a successful
completion of the command */
#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
sufficient privileges to
execute the command */
#define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command
was invalid. The extended
status contains the index
of the parameter */
#define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient
chip resources to execute
the command */
#define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing
because the queue was
getting flushed */
#define MCC_STATUS_DMA_FAILED (5) /* The command is completing
with a DMA error */
/*
* --- MGMT_ERROR_CODES ---
* Error Codes returned in the status field of the FWCMD response header
*/
#define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed
without errors */
#define MGMT_STATUS_FAILED (1) /* Error status in the Status
field of the
struct FWCMD_RESPONSE_HEADER */
#define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */
#define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in
the FWCMD payload */
#endif /* __fwcmd_opcodes_amap_h__ */

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_types_bmap_h__
#define __fwcmd_types_bmap_h__
/* MAC address format */
struct MAC_ADDRESS_FORMAT {
u16 SizeOfStructure;
u8 MACAddress[6];
} __packed;
#endif /* __fwcmd_types_bmap_h__ */

View File

@ -0,0 +1,182 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __host_struct_amap_h__
#define __host_struct_amap_h__
#include "be_cm.h"
#include "be_common.h"
#include "descriptors.h"
/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
#define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */
/* qcompletion ueue. */
#define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */
#define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */
#define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */
#define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */
#define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */
/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
#define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */
/* completion queue. */
#define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */
/* Queue Entry Definition for all 4 byte event queue types. */
struct BE_EQ_ENTRY_AMAP {
u8 Valid; /* DWORD 0 */
u8 MajorCode[3]; /* DWORD 0 */
u8 MinorCode[12]; /* DWORD 0 */
u8 ResourceID[16]; /* DWORD 0 */
} __packed;
struct EQ_ENTRY_AMAP {
u32 dw[1];
};
/*
* --- ETH_EVENT_CODE ---
* These codes are returned by the MPU when one of these events has occurred,
* and the event is configured to report to an Event Queue when an event
* is detected.
*/
#define ETH_EQ_LINK_STATUS (0) /* Link status change event */
/* detected. */
#define ETH_EQ_WATERMARK (1) /* watermark event detected. */
#define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */
#define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */
/* detected. */
/*
* --- ETH_TX_COMPL_STATUS_ENUM ---
* Status codes contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_VALID (0)
#define ETH_COMP_ERROR (1)
#define ETH_COMP_INVALID (15)
/*
* --- ETH_TX_COMPL_PORT_ENUM ---
* Port indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_PORT0 (0)
#define ETH_COMP_PORT1 (1)
#define ETH_COMP_MGMT (2)
/*
* --- ETH_TX_COMPL_CT_ENUM ---
* Completion type indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_ETH (0)
/*
* Work request block that the driver issues to the chip for
* Ethernet transmissions. All control fields must be valid in each WRB for
* a message. The controller, as specified by the flags, optionally writes
* an entry to the Completion Ring and generate an event.
*/
struct BE_ETH_WRB_AMAP {
u8 frag_pa_hi[32]; /* DWORD 0 */
u8 frag_pa_lo[32]; /* DWORD 1 */
u8 complete; /* DWORD 2 */
u8 event; /* DWORD 2 */
u8 crc; /* DWORD 2 */
u8 forward; /* DWORD 2 */
u8 ipsec; /* DWORD 2 */
u8 mgmt; /* DWORD 2 */
u8 ipcs; /* DWORD 2 */
u8 udpcs; /* DWORD 2 */
u8 tcpcs; /* DWORD 2 */
u8 lso; /* DWORD 2 */
u8 last; /* DWORD 2 */
u8 vlan; /* DWORD 2 */
u8 dbg[3]; /* DWORD 2 */
u8 hash_val[3]; /* DWORD 2 */
u8 lso_mss[14]; /* DWORD 2 */
u8 frag_len[16]; /* DWORD 3 */
u8 vlan_tag[16]; /* DWORD 3 */
} __packed;
struct ETH_WRB_AMAP {
u32 dw[4];
};
/* This is an Ethernet transmit completion descriptor */
struct BE_ETH_TX_COMPL_AMAP {
u8 user_bytes[16]; /* DWORD 0 */
u8 nwh_bytes[8]; /* DWORD 0 */
u8 lso; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 wrb_index[16]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 port[2]; /* DWORD 1 */
u8 rsvd1[8]; /* DWORD 1 */
u8 status[4]; /* DWORD 1 */
u8 rsvd2[16]; /* DWORD 2 */
u8 ringid[11]; /* DWORD 2 */
u8 hash_val[4]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
} __packed;
struct ETH_TX_COMPL_AMAP {
u32 dw[4];
};
/* Ethernet Receive Buffer descriptor */
struct BE_ETH_RX_D_AMAP {
u8 fragpa_hi[32]; /* DWORD 0 */
u8 fragpa_lo[32]; /* DWORD 1 */
} __packed;
struct ETH_RX_D_AMAP {
u32 dw[2];
};
/* This is an Ethernet Receive Completion Descriptor */
struct BE_ETH_RX_COMPL_AMAP {
u8 vlan_tag[16]; /* DWORD 0 */
u8 pktsize[14]; /* DWORD 0 */
u8 port; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 err; /* DWORD 1 */
u8 rsshp; /* DWORD 1 */
u8 ipf; /* DWORD 1 */
u8 tcpf; /* DWORD 1 */
u8 udpf; /* DWORD 1 */
u8 ipcksm; /* DWORD 1 */
u8 tcpcksm; /* DWORD 1 */
u8 udpcksm; /* DWORD 1 */
u8 macdst[6]; /* DWORD 1 */
u8 vtp; /* DWORD 1 */
u8 vtm; /* DWORD 1 */
u8 fragndx[10]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 ipsec; /* DWORD 1 */
u8 numfrags[3]; /* DWORD 1 */
u8 rsvd1[31]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsshash[32]; /* DWORD 3 */
} __packed;
struct ETH_RX_COMPL_AMAP {
u32 dw[4];
};
#endif /* __host_struct_amap_h__ */

View File

@ -0,0 +1,830 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef __hwlib_h__
#define __hwlib_h__
#include <linux/module.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include "regmap.h" /* srcgen array map output */
#include "asyncmesg.h"
#include "fwcmd_opcodes.h"
#include "post_codes.h"
#include "fwcmd_mcc.h"
#include "fwcmd_types_bmap.h"
#include "fwcmd_common_bmap.h"
#include "fwcmd_eth_bmap.h"
#include "bestatus.h"
/*
*
* Macros for reading/writing a protection domain or CSR registers
* in BladeEngine.
*/
#define PD_READ(fo, field) ioread32((fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define CSR_READ(fo, field) ioread32((fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#ifdef BE_DEBUG
#define ASSERT(c) BUG_ON(!(c));
#else
#define ASSERT(c)
#endif
/* debug levels */
enum BE_DEBUG_LEVELS {
DL_ALWAYS = 0, /* cannot be masked */
DL_ERR = 0x1, /* errors that should never happen */
DL_WARN = 0x2, /* something questionable.
recoverable errors */
DL_NOTE = 0x4, /* infrequent, important debug info */
DL_INFO = 0x8, /* debug information */
DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
} ;
extern unsigned int trace_level;
#define TRACE(lm, fmt, args...) { \
if (trace_level & lm) { \
printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
__FILE__ , __LINE__ , ## args); \
} \
}
static inline unsigned int be_trace_set_level(unsigned int level)
{
unsigned int old_level = trace_level;
trace_level = level;
return old_level;
}
#define be_trace_get_level() trace_level
/*
* Returns number of pages spanned by the size of data
* starting at the given address.
*/
#define PAGES_SPANNED(_address, _size) \
((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
(_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
/*
* circular subtract.
* Returns a - b assuming a circular number system, where a and b are
* in range (0, maxValue-1). If a==b, zero is returned so the
* highest value possible with this subtraction is maxValue-1.
*/
static inline u32 be_subc(u32 a, u32 b, u32 max)
{
ASSERT(a <= max && b <= max);
ASSERT(max > 0);
return a >= b ? (a - b) : (max - b + a);
}
static inline u32 be_addc(u32 a, u32 b, u32 max)
{
ASSERT(a < max);
ASSERT(max > 0);
return (max - a > b) ? (a + b) : (b + a - max);
}
/* descriptor for a physically contiguous memory used for ring */
struct ring_desc {
u32 length; /* length in bytes */
void *va; /* virtual address */
u64 pa; /* bus address */
} ;
/*
* This structure stores information about a ring shared between hardware
* and software. Each ring is allocated by the driver in the uncached
* extension and mapped into BladeEngine's unified table.
*/
struct mp_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
u32 num; /* number of elements in queue */
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 itemSize; /* size in bytes of one object */
void *va; /* The virtual address of the ring.
This should be last to allow 32 & 64
bit debugger extensions to work. */
} ;
/*----------- amap bit filed get / set macros and functions -----*/
/*
* Structures defined in the map header files (under fw/amap/) with names
* in the format BE_<name>_AMAP are pseudo structures with members
* of type u8. These structures are templates that are used in
* conjuntion with the structures with names in the format
* <name>_AMAP to calculate the bit masks and bit offsets to get or set
* bit fields in structures. The structures <name>_AMAP are arrays
* of 32 bits words and have the correct size. The following macros
* provide convenient ways to get and set the various members
* in the structures without using strucctures with bit fields.
* Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
* macros to extract and set various members.
*/
/*
* Returns the a bit mask for the register that is NOT shifted into location.
* That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
*/
static inline u32 amap_mask(u32 bit_size)
{
return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
}
#define AMAP_BIT_MASK(_struct_, field) \
amap_mask(AMAP_BIT_SIZE(_struct_, field))
/*
* non-optimized set bits function. First clears the bits and then assigns them.
* This does not require knowledge of the particular DWORD you are setting.
* e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
*/
static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
u32 *dw = (u32 *)ptr;
*(dw + dw_offset) &= ~(mask << offset);
*(dw + dw_offset) |= (mask & value) << offset;
}
#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \
amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field), val)
/*
* Non-optimized routine that gets the bits without knowing the correct DWORD.
* e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
*/
static inline u32
amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
u32 *dw = (u32 *)ptr;
return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \
amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field))
/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
#define AMAP_BIT_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
/* Returns 0-n representing DWORD offset of bitfield within the structure. */
#define AMAP_WORD_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
/* Returns size of bitfield in bits. */
#define AMAP_BIT_SIZE(_struct_, field) \
sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
struct be_mcc_wrb_response_copy {
u16 length; /* bytes in response */
u16 fwcmd_offset; /* offset within the wrb of the response */
void *va; /* user's va to copy response into */
} ;
typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
struct MCC_WRB_AMAP *optional_wrb);
struct be_mcc_wrb_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
int *users_final_status; /* pointer to a local
variable for synchronous
commands */
struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
commands only */
struct list_head next; /* links context structs together in
free list */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
#if defined(BE_DEBUG)
u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
struct MCC_WRB_AMAP *ring_wrb;
u32 consumed_count;
#endif
} ;
/*
Represents a function object for network or storage. This
is used to manage per-function resources like MCC CQs, etc.
*/
struct be_function_object {
u32 magic; /*!< magic for detecting memory corruption. */
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI config space */
u32 emulate; /* if set, MPU is not available.
Emulate everything. */
u32 pend_queue_driving; /* if set, drive the queued WRBs
after releasing the WRB lock */
spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
spinlock_t cq_lock; /* lock for verifying one thread
processing cq */
spinlock_t mcc_context_lock; /* lock for protecting mcc
context free list */
unsigned long post_irq;
unsigned long cq_irq;
u32 type;
u32 pci_function_number;
struct be_mcc_object *mcc; /* mcc rings. */
struct {
struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
u64 pa; /* PA to the mailbox */
u32 length; /* byte length of mailbox */
/* One default context struct used for posting at
* least one MCC_WRB
*/
struct be_mcc_wrb_context default_context;
bool default_context_allocated;
} mailbox;
struct {
/* Wake on lans configured. */
u32 wol_bitmask; /* bits 0,1,2,3 are set if
corresponding index is enabled */
} config;
struct BE_FIRMWARE_CONFIG fw_config;
} ;
/*
Represents an Event Queue
*/
struct be_eq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct list_head eq_list;
struct list_head cq_list_head;
u32 eq_id;
void *cb_context;
} ;
/*
Manages a completion queue
*/
struct be_cq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct be_eq_object *eq_object;
struct list_head cq_list;
struct list_head cqlist_for_eq;
void *va;
u32 num_entries;
void *cb_context;
u32 cq_id;
} ;
/*
Manages an ethernet send queue
*/
struct be_ethsq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
struct be_cq_object *cq_object;
u32 bid;
} ;
/*
@brief
Manages an ethernet receive queue
*/
struct be_ethrq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
u32 rid;
struct be_cq_object *cq_object;
struct be_cq_object *rss_cq_object[4];
} ;
/*
Manages an MCC
*/
typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
void *event);
struct be_mcc_object {
u32 magic;
struct be_function_object *parent_function;
struct list_head mcc_list;
struct be_cq_object *cq_object;
/* Async event callback for MCC CQ. */
mcc_async_event_callback async_cb;
void *async_context;
struct {
struct be_mcc_wrb_context *base;
u32 num;
struct list_head list_head;
} wrb_context;
struct {
struct ring_desc *rd;
struct mp_ring ring;
} sq;
struct {
struct mp_ring ring;
} cq;
u32 processing; /* flag indicating that one thread
is processing CQ */
u32 rearm; /* doorbell rearm setting to make
sure the active processing thread */
/* rearms the CQ if any of the threads requested it. */
struct list_head backlog;
u32 backlog_length;
u32 driving_backlog;
u32 consumed_index;
} ;
/* Queue context header -- the required software information for
* queueing a WRB.
*/
struct be_queue_driver_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
void *optional_fwcmd_va;
struct list_head list;
u32 bytes;
} ;
/*
* Common MCC WRB header that all commands require.
*/
struct be_mcc_wrb_header {
u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
} ;
/*
* All non embedded commands supported by hwlib functions only allow
* 1 SGE. This queue context handles them all.
*/
struct be_nonembedded_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_SGE_AMAP sge[1];
} ;
/*
* ------------------------------------------------------------------------
* This section contains the specific queue struct for each command.
* The user could always provide a be_generic_q_ctxt but this is a
* rather large struct. By using the specific struct, memory consumption
* can be reduced.
* ------------------------------------------------------------------------
*/
struct be_link_status_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
} ;
struct be_multicast_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
} ;
struct be_vlan_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
} ;
struct be_promiscuous_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_ETH_PROMISCUOUS fwcmd;
} ;
struct be_force_failover_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
} ;
struct be_rxf_filter_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
} ;
struct be_eq_modify_delay_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
} ;
/*
* The generic context is the largest size that would be required.
* It is the software context plus an entire WRB.
*/
struct be_generic_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_WRB_PAYLOAD_AMAP payload;
} ;
/*
* Types for the BE_QUEUE_CONTEXT object.
*/
#define BE_QUEUE_INVALID (0)
#define BE_QUEUE_LINK_STATUS (0xA006)
#define BE_QUEUE_ETH_STATS (0xA007)
#define BE_QUEUE_TPM_STATS (0xA008)
#define BE_QUEUE_TCP_STATS (0xA009)
#define BE_QUEUE_MULTICAST (0xA00A)
#define BE_QUEUE_VLAN (0xA00B)
#define BE_QUEUE_RSS (0xA00C)
#define BE_QUEUE_FORCE_FAILOVER (0xA00D)
#define BE_QUEUE_PROMISCUOUS (0xA00E)
#define BE_QUEUE_WAKE_ON_LAN (0xA00F)
#define BE_QUEUE_NOP (0xA010)
/* --- BE_FUNCTION_ENUM --- */
#define BE_FUNCTION_TYPE_ISCSI (0)
#define BE_FUNCTION_TYPE_NETWORK (1)
#define BE_FUNCTION_TYPE_ARM (2)
/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
#define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
#define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
/* network packets. */
#define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
/* network packets, bound */
/* to a physical port. */
/*
* ----------------------------------------------------------------------
* API MACROS
* ----------------------------------------------------------------------
*/
#define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
#define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
struct be_function_object *pfob);
int be_function_object_destroy(struct be_function_object *pfob);
int be_function_cleanup(struct be_function_object *pfob);
int be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_eq_modify_delay_q_ctxt *q_ctxt);
int be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
int be_eq_destroy(struct be_eq_object *eq);
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
bool solicited_eventable, bool no_delay,
u32 wm_thresh, struct be_eq_object *eq_object,
struct be_cq_object *cq_object);
int be_cq_destroy(struct be_cq_object *cq);
int be_mcc_ring_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
struct be_mcc_wrb_context *context_array,
u32 num_context_entries,
struct be_cq_object *cq, struct be_mcc_object *mcc);
int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
mcc_async_event_callback cb, void *cb_context);
int be_pci_soft_reset(struct be_function_object *pfob);
int be_drive_POST(struct be_function_object *pfob);
int be_eth_sq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_ethsq_object *eth_sq);
struct be_eth_sq_parameters {
u32 port;
u32 rsvd0[2];
} ;
int be_eth_sq_create_ex(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_eth_sq_parameters *ex_parameters,
struct be_ethsq_object *eth_sq);
int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
int be_eth_set_flow_control(struct be_function_object *pfob,
bool txfc_enable, bool rxfc_enable);
int be_eth_get_flow_control(struct be_function_object *pfob,
bool *txfc_enable, bool *rxfc_enable);
int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
int be_eth_set_frame_size(struct be_function_object *pfob,
u32 *tx_frame_size, u32 *rx_frame_size);
int be_eth_rq_create(struct be_function_object *pfob,
struct ring_desc *rd, struct be_cq_object *cq_object,
struct be_cq_object *bcmc_cq_object,
struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eth_rq_set_frag_size(struct be_function_object *pfob,
u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
int be_eth_rq_get_frag_size(struct be_function_object *pfob,
u32 *frag_size_bytes);
void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
u32 payload_length, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem);
void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
u32 payload_length, u32 request_length, u32 response_length,
u32 opcode, u32 subsystem);
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob);
int be_rxf_mac_address_read_write(struct be_function_object *pfob,
bool port1, bool mac1, bool mgmt,
bool write, bool permanent, u8 *mac_address,
mcc_wrb_cqe_callback cb,
void *cb_context);
int be_rxf_multicast_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u8 *mac_table,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_multicast_q_ctxt *q_ctxt);
int be_rxf_vlan_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u16 *vlan_tag_array,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_vlan_q_ctxt *q_ctxt);
int be_rxf_link_status(struct be_function_object *pfob,
struct BE_LINK_STATUS *link_status,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_link_status_q_ctxt *q_ctxt);
int be_rxf_query_eth_statistics(struct be_function_object *pfob,
struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_nonembedded_q_ctxt *q_ctxt);
int be_rxf_promiscuous(struct be_function_object *pfob,
bool enable_port0, bool enable_port1,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_promiscuous_q_ctxt *q_ctxt);
int be_rxf_filter_config(struct be_function_object *pfob,
struct NTWK_RX_FILTER_SETTINGS *settings,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_rxf_filter_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* internal functions used by hwlib
* ------------------------------------------------------
*/
int be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_callback_context);
int be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *response_copy);
int be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* MCC QUEUE
* ------------------------------------------------------
*/
int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
struct MCC_WRB_AMAP *
_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
struct be_mcc_wrb_context *
_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
void _be_mcc_free_wrb_context(struct be_function_object *pfob,
struct be_mcc_wrb_context *context);
int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
/*
* ------------------------------------------------------
* Ring Sizes
* ------------------------------------------------------
*/
static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
{
ASSERT(encoding != 1); /* 1 is rsvd */
ASSERT(encoding < 16);
ASSERT(object_size > 0);
if (encoding == 0) /* 32k deep */
encoding = 16;
return (1 << (encoding - 1)) * object_size;
}
static inline
u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
{
u32 count, encoding;
ASSERT(object_size > 0);
ASSERT(length_in_bytes % object_size == 0);
count = length_in_bytes / object_size;
ASSERT(count > 1);
ASSERT(count <= 32 * 1024);
ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
encoding = __ilog2_u32(count) + 1;
if (encoding == 16)
encoding = 0; /* 32k deep */
return encoding;
}
void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
u32 max_num);
#endif /* __hwlib_h__ */

1364
drivers/staging/benet/mpu.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_amap_h__
#define __mpu_amap_h__
#include "ep.h"
/* Provide control parameters for the Managment Processor Unit. */
struct BE_MPU_CSRMAP_AMAP {
struct BE_EP_CSRMAP_AMAP ep;
u8 rsvd0[128]; /* DWORD 64 */
u8 rsvd1[32]; /* DWORD 68 */
u8 rsvd2[192]; /* DWORD 69 */
u8 rsvd3[192]; /* DWORD 75 */
u8 rsvd4[32]; /* DWORD 81 */
u8 rsvd5[32]; /* DWORD 82 */
u8 rsvd6[32]; /* DWORD 83 */
u8 rsvd7[32]; /* DWORD 84 */
u8 rsvd8[32]; /* DWORD 85 */
u8 rsvd9[32]; /* DWORD 86 */
u8 rsvd10[32]; /* DWORD 87 */
u8 rsvd11[32]; /* DWORD 88 */
u8 rsvd12[32]; /* DWORD 89 */
u8 rsvd13[32]; /* DWORD 90 */
u8 rsvd14[32]; /* DWORD 91 */
u8 rsvd15[32]; /* DWORD 92 */
u8 rsvd16[32]; /* DWORD 93 */
u8 rsvd17[32]; /* DWORD 94 */
u8 rsvd18[32]; /* DWORD 95 */
u8 rsvd19[32]; /* DWORD 96 */
u8 rsvd20[32]; /* DWORD 97 */
u8 rsvd21[32]; /* DWORD 98 */
u8 rsvd22[32]; /* DWORD 99 */
u8 rsvd23[32]; /* DWORD 100 */
u8 rsvd24[32]; /* DWORD 101 */
u8 rsvd25[32]; /* DWORD 102 */
u8 rsvd26[32]; /* DWORD 103 */
u8 rsvd27[32]; /* DWORD 104 */
u8 rsvd28[96]; /* DWORD 105 */
u8 rsvd29[32]; /* DWORD 108 */
u8 rsvd30[32]; /* DWORD 109 */
u8 rsvd31[32]; /* DWORD 110 */
u8 rsvd32[32]; /* DWORD 111 */
u8 rsvd33[32]; /* DWORD 112 */
u8 rsvd34[96]; /* DWORD 113 */
u8 rsvd35[32]; /* DWORD 116 */
u8 rsvd36[32]; /* DWORD 117 */
u8 rsvd37[32]; /* DWORD 118 */
u8 rsvd38[32]; /* DWORD 119 */
u8 rsvd39[32]; /* DWORD 120 */
u8 rsvd40[32]; /* DWORD 121 */
u8 rsvd41[134][32]; /* DWORD 122 */
} __packed;
struct MPU_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __mpu_amap_h__ */

View File

@ -0,0 +1,46 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_context_amap_h__
#define __mpu_context_amap_h__
/*
* Management command and control ring context. The MPUs BTLR_CTRL1 CSR
* controls the writeback behavior of the producer and consumer index values.
*/
struct BE_MCC_RING_CONTEXT_AMAP {
u8 con_index[16]; /* DWORD 0 */
u8 ring_size[4]; /* DWORD 0 */
u8 cq_id[11]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 prod_index[16]; /* DWORD 1 */
u8 pdid[15]; /* DWORD 1 */
u8 invalid; /* DWORD 1 */
u8 cmd_pending_current[7]; /* DWORD 2 */
u8 rsvd1[25]; /* DWORD 2 */
u8 hpi_port_cq_id[11]; /* DWORD 3 */
u8 rsvd2[5]; /* DWORD 3 */
u8 cmd_pending_max[7]; /* DWORD 3 */
u8 rsvd3[9]; /* DWORD 3 */
} __packed;
struct MCC_RING_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __mpu_context_amap_h__ */

View File

@ -0,0 +1,825 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __pcicfg_amap_h__
#define __pcicfg_amap_h__
/* Vendor and Device ID Register. */
struct BE_PCICFG_ID_CSR_AMAP {
u8 vendorid[16]; /* DWORD 0 */
u8 deviceid[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ID_CSR_AMAP {
u32 dw[1];
};
/* IO Bar Register. */
struct BE_PCICFG_IOBAR_CSR_AMAP {
u8 iospace; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 iobar[24]; /* DWORD 0 */
} __packed;
struct PCICFG_IOBAR_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 0 Register. */
struct BE_PCICFG_MEMBAR0_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 membar0[18]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR0_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - Low Address Register. */
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 membar1lo[15]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - High Address Register. */
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
u8 membar1hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_HI_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - Low Address Register. */
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[17]; /* DWORD 0 */
u8 membar2lo[11]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - High Address Register. */
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
u8 membar2hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_HI_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 0) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 1) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
u8 locked; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Soft Reset Register. */
struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
u8 rsvd0[7]; /* DWORD 0 */
u8 softreset; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */
} __packed;
struct PCICFG_SOFT_RESET_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
u8 cev_ue_status; /* DWORD 0 */
u8 ctx_ue_status; /* DWORD 0 */
u8 dbuf_ue_status; /* DWORD 0 */
u8 erx_ue_status; /* DWORD 0 */
u8 host_ue_status; /* DWORD 0 */
u8 mpu_ue_status; /* DWORD 0 */
u8 ndma_ue_status; /* DWORD 0 */
u8 ptc_ue_status; /* DWORD 0 */
u8 rdma_ue_status; /* DWORD 0 */
u8 rxf_ue_status; /* DWORD 0 */
u8 rxips_ue_status; /* DWORD 0 */
u8 rxulp0_ue_status; /* DWORD 0 */
u8 rxulp1_ue_status; /* DWORD 0 */
u8 rxulp2_ue_status; /* DWORD 0 */
u8 tim_ue_status; /* DWORD 0 */
u8 tpost_ue_status; /* DWORD 0 */
u8 tpre_ue_status; /* DWORD 0 */
u8 txips_ue_status; /* DWORD 0 */
u8 txulp0_ue_status; /* DWORD 0 */
u8 txulp1_ue_status; /* DWORD 0 */
u8 uc_ue_status; /* DWORD 0 */
u8 wdma_ue_status; /* DWORD 0 */
u8 txulp2_ue_status; /* DWORD 0 */
u8 host1_ue_status; /* DWORD 0 */
u8 p0_ob_link_ue_status; /* DWORD 0 */
u8 p1_ob_link_ue_status; /* DWORD 0 */
u8 host_gpio_ue_status; /* DWORD 0 */
u8 mbox_netw_ue_status; /* DWORD 0 */
u8 mbox_stor_ue_status; /* DWORD 0 */
u8 axgmac0_ue_status; /* DWORD 0 */
u8 axgmac1_ue_status; /* DWORD 0 */
u8 mpu_intpend_ue_status; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (High) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip;
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
u8 jtag_ue_status; /* DWORD 0 */
u8 lpcmemhost_ue_status; /* DWORD 0 */
u8 mgmt_mac_ue_status; /* DWORD 0 */
u8 mpu_iram_ue_status; /* DWORD 0 */
u8 pcs0online_ue_status; /* DWORD 0 */
u8 pcs1online_ue_status; /* DWORD 0 */
u8 pctl0_ue_status; /* DWORD 0 */
u8 pctl1_ue_status; /* DWORD 0 */
u8 pmem_ue_status; /* DWORD 0 */
u8 rr_ue_status; /* DWORD 0 */
u8 rxpp_ue_status; /* DWORD 0 */
u8 txpb_ue_status; /* DWORD 0 */
u8 txp_ue_status; /* DWORD 0 */
u8 xaui_ue_status; /* DWORD 0 */
u8 arm_ue_status; /* DWORD 0 */
u8 ipc_ue_status; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u8 cev_ue_mask; /* DWORD 0 */
u8 ctx_ue_mask; /* DWORD 0 */
u8 dbuf_ue_mask; /* DWORD 0 */
u8 erx_ue_mask; /* DWORD 0 */
u8 host_ue_mask; /* DWORD 0 */
u8 mpu_ue_mask; /* DWORD 0 */
u8 ndma_ue_mask; /* DWORD 0 */
u8 ptc_ue_mask; /* DWORD 0 */
u8 rdma_ue_mask; /* DWORD 0 */
u8 rxf_ue_mask; /* DWORD 0 */
u8 rxips_ue_mask; /* DWORD 0 */
u8 rxulp0_ue_mask; /* DWORD 0 */
u8 rxulp1_ue_mask; /* DWORD 0 */
u8 rxulp2_ue_mask; /* DWORD 0 */
u8 tim_ue_mask; /* DWORD 0 */
u8 tpost_ue_mask; /* DWORD 0 */
u8 tpre_ue_mask; /* DWORD 0 */
u8 txips_ue_mask; /* DWORD 0 */
u8 txulp0_ue_mask; /* DWORD 0 */
u8 txulp1_ue_mask; /* DWORD 0 */
u8 uc_ue_mask; /* DWORD 0 */
u8 wdma_ue_mask; /* DWORD 0 */
u8 txulp2_ue_mask; /* DWORD 0 */
u8 host1_ue_mask; /* DWORD 0 */
u8 p0_ob_link_ue_mask; /* DWORD 0 */
u8 p1_ob_link_ue_mask; /* DWORD 0 */
u8 host_gpio_ue_mask; /* DWORD 0 */
u8 mbox_netw_ue_mask; /* DWORD 0 */
u8 mbox_stor_ue_mask; /* DWORD 0 */
u8 axgmac0_ue_mask; /* DWORD 0 */
u8 axgmac1_ue_mask; /* DWORD 0 */
u8 mpu_intpend_ue_mask; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u8 jtag_ue_mask; /* DWORD 0 */
u8 lpcmemhost_ue_mask; /* DWORD 0 */
u8 mgmt_mac_ue_mask; /* DWORD 0 */
u8 mpu_iram_ue_mask; /* DWORD 0 */
u8 pcs0online_ue_mask; /* DWORD 0 */
u8 pcs1online_ue_mask; /* DWORD 0 */
u8 pctl0_ue_mask; /* DWORD 0 */
u8 pctl1_ue_mask; /* DWORD 0 */
u8 pmem_ue_mask; /* DWORD 0 */
u8 rr_ue_mask; /* DWORD 0 */
u8 rxpp_ue_mask; /* DWORD 0 */
u8 txpb_ue_mask; /* DWORD 0 */
u8 txp_ue_mask; /* DWORD 0 */
u8 xaui_ue_mask; /* DWORD 0 */
u8 arm_ue_mask; /* DWORD 0 */
u8 ipc_ue_mask; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 0. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE0_CSR_AMAP {
u8 cev_online; /* DWORD 0 */
u8 ctx_online; /* DWORD 0 */
u8 dbuf_online; /* DWORD 0 */
u8 erx_online; /* DWORD 0 */
u8 host_online; /* DWORD 0 */
u8 mpu_online; /* DWORD 0 */
u8 ndma_online; /* DWORD 0 */
u8 ptc_online; /* DWORD 0 */
u8 rdma_online; /* DWORD 0 */
u8 rxf_online; /* DWORD 0 */
u8 rxips_online; /* DWORD 0 */
u8 rxulp0_online; /* DWORD 0 */
u8 rxulp1_online; /* DWORD 0 */
u8 rxulp2_online; /* DWORD 0 */
u8 tim_online; /* DWORD 0 */
u8 tpost_online; /* DWORD 0 */
u8 tpre_online; /* DWORD 0 */
u8 txips_online; /* DWORD 0 */
u8 txulp0_online; /* DWORD 0 */
u8 txulp1_online; /* DWORD 0 */
u8 uc_online; /* DWORD 0 */
u8 wdma_online; /* DWORD 0 */
u8 txulp2_online; /* DWORD 0 */
u8 host1_online; /* DWORD 0 */
u8 p0_ob_link_online; /* DWORD 0 */
u8 p1_ob_link_online; /* DWORD 0 */
u8 host_gpio_online; /* DWORD 0 */
u8 mbox_netw_online; /* DWORD 0 */
u8 mbox_stor_online; /* DWORD 0 */
u8 axgmac0_online; /* DWORD 0 */
u8 axgmac1_online; /* DWORD 0 */
u8 mpu_intpend_online; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE0_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 1. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE1_CSR_AMAP {
u8 jtag_online; /* DWORD 0 */
u8 lpcmemhost_online; /* DWORD 0 */
u8 mgmt_mac_online; /* DWORD 0 */
u8 mpu_iram_online; /* DWORD 0 */
u8 pcs0online_online; /* DWORD 0 */
u8 pcs1online_online; /* DWORD 0 */
u8 pctl0_online; /* DWORD 0 */
u8 pctl1_online; /* DWORD 0 */
u8 pmem_online; /* DWORD 0 */
u8 rr_online; /* DWORD 0 */
u8 rxpp_online; /* DWORD 0 */
u8 txpb_online; /* DWORD 0 */
u8 txp_online; /* DWORD 0 */
u8 xaui_online; /* DWORD 0 */
u8 arm_online; /* DWORD 0 */
u8 ipc_online; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE1_CSR_AMAP {
u32 dw[1];
};
/* Host Timer Register. */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u8 hosttimer[24]; /* DWORD 0 */
u8 hostintr; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
} __packed;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u32 dw[1];
};
/* Scratchpad Register (for software use). */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
u8 scratchpad[32]; /* DWORD 0 */
} __packed;
struct PCICFG_SCRATCHPAD_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Capabilities Register. */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextcap[8]; /* DWORD 0 */
u8 capver[4]; /* DWORD 0 */
u8 devport[4]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Capabilities Register. */
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
u8 payload[3]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 lo_lat[3]; /* DWORD 0 */
u8 l1_lat[3]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
u8 pwr_value[8]; /* DWORD 0 */
u8 pwr_scale[2]; /* DWORD 0 */
u8 rsvd3[4]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Control/Status Registers. */
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u8 CorrErrReportEn; /* DWORD 0 */
u8 NonFatalErrReportEn; /* DWORD 0 */
u8 FatalErrReportEn; /* DWORD 0 */
u8 UnsuppReqReportEn; /* DWORD 0 */
u8 EnableRelaxOrder; /* DWORD 0 */
u8 Max_Payload_Size[3]; /* DWORD 0 */
u8 ExtendTagFieldEnable; /* DWORD 0 */
u8 PhantomFnEnable; /* DWORD 0 */
u8 AuxPwrPMEnable; /* DWORD 0 */
u8 EnableNoSnoop; /* DWORD 0 */
u8 Max_Read_Req_Size[3]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 CorrErrDetect; /* DWORD 0 */
u8 NonFatalErrDetect; /* DWORD 0 */
u8 FatalErrDetect; /* DWORD 0 */
u8 UnsuppReqDetect; /* DWORD 0 */
u8 AuxPwrDetect; /* DWORD 0 */
u8 TransPending; /* DWORD 0 */
u8 rsvd1[10]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Capabilities Register. */
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u8 MaxLinkSpeed[4]; /* DWORD 0 */
u8 MaxLinkWidth[6]; /* DWORD 0 */
u8 ASPMSupport[2]; /* DWORD 0 */
u8 L0sExitLat[3]; /* DWORD 0 */
u8 L1ExitLat[3]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 PortNum[8]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Status Register. */
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u8 ASPMCtl[2]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 ReadCmplBndry; /* DWORD 0 */
u8 LinkDisable; /* DWORD 0 */
u8 RetrainLink; /* DWORD 0 */
u8 CommonClkConfig; /* DWORD 0 */
u8 ExtendSync; /* DWORD 0 */
u8 rsvd1[8]; /* DWORD 0 */
u8 LinkSpeed[4]; /* DWORD 0 */
u8 NegLinkWidth[6]; /* DWORD 0 */
u8 LinkTrainErr; /* DWORD 0 */
u8 LinkTrain; /* DWORD 0 */
u8 SlotClkConfig; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI Configuration Register. */
struct BE_PCICFG_MSI_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextptr[8]; /* DWORD 0 */
u8 tablesize[11]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 funcmask; /* DWORD 0 */
u8 en; /* DWORD 0 */
} __packed;
struct PCICFG_MSI_CSR_AMAP {
u32 dw[1];
};
/* MSI-X Table Offset Register. */
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
u8 tablebir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_TABLE_CSR_AMAP {
u32 dw[1];
};
/* MSI-X PBA Offset Register. */
struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
u8 pbabir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_PBA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Vector Control Register. */
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u8 vector_control; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Data Register. */
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u8 data[16]; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - High Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u8 addr[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - Low Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u8 rsvd0[2]; /* DWORD 0 */
u8 addr[30]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_18_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_18_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_19_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_19_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_20_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[25][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_20_RSVD_AMAP {
u32 dw[26];
};
struct BE_PCICFG_ANON_21_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1919][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_21_RSVD_AMAP {
u32 dw[1920];
};
struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_22_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_23_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_23_RSVD_AMAP {
u32 dw[896];
};
/* These PCI Configuration Space registers are for the Storage Function of
* BladeEngine (Function 0). In the memory map of the registers below their
* table,
*/
struct BE_PCICFG0_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[32]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 61 */
u8 rsvd25[32]; /* DWORD 62 */
u8 rsvd26[32]; /* DWORD 63 */
u8 rsvd27[32]; /* DWORD 64 */
u8 rsvd28[32]; /* DWORD 65 */
u8 rsvd29[32]; /* DWORD 66 */
u8 rsvd30[32]; /* DWORD 67 */
u8 rsvd31[32]; /* DWORD 68 */
u8 rsvd32[32]; /* DWORD 69 */
u8 rsvd33[32]; /* DWORD 70 */
u8 rsvd34[32]; /* DWORD 71 */
u8 rsvd35[32]; /* DWORD 72 */
u8 rsvd36[32]; /* DWORD 73 */
u8 rsvd37[32]; /* DWORD 74 */
u8 rsvd38[32]; /* DWORD 75 */
u8 rsvd39[32]; /* DWORD 76 */
u8 rsvd40[32]; /* DWORD 77 */
u8 rsvd41[32]; /* DWORD 78 */
u8 rsvd42[32]; /* DWORD 79 */
u8 rsvd43[32]; /* DWORD 80 */
u8 rsvd44[32]; /* DWORD 81 */
u8 rsvd45[32]; /* DWORD 82 */
u8 rsvd46[32]; /* DWORD 83 */
u8 rsvd47[32]; /* DWORD 84 */
u8 rsvd48[32]; /* DWORD 85 */
u8 rsvd49[32]; /* DWORD 86 */
u8 rsvd50[32]; /* DWORD 87 */
u8 rsvd51[32]; /* DWORD 88 */
u8 rsvd52[32]; /* DWORD 89 */
u8 rsvd53[32]; /* DWORD 90 */
u8 rsvd54[32]; /* DWORD 91 */
u8 rsvd55[32]; /* DWORD 92 */
u8 rsvd56[832]; /* DWORD 93 */
u8 rsvd57[32]; /* DWORD 119 */
u8 rsvd58[32]; /* DWORD 120 */
u8 rsvd59[32]; /* DWORD 121 */
u8 rsvd60[32]; /* DWORD 122 */
u8 rsvd61[32]; /* DWORD 123 */
u8 rsvd62[32]; /* DWORD 124 */
u8 rsvd63[32]; /* DWORD 125 */
u8 rsvd64[32]; /* DWORD 126 */
u8 rsvd65[32]; /* DWORD 127 */
u8 rsvd66[61440]; /* DWORD 128 */
struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
u8 rsvd67[28672]; /* DWORD 2176 */
u8 rsvd68[32]; /* DWORD 3072 */
u8 rsvd69[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG0_CSRMAP_AMAP {
u32 dw[4096];
};
struct BE_PCICFG_ANON_24_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_24_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_25_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_25_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_26_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_26_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_27_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_27_RSVD_AMAP {
u32 dw[2];
};
struct BE_PCICFG_ANON_28_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[3][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_28_RSVD_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_29_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[36][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_29_RSVD_AMAP {
u32 dw[37];
};
struct BE_PCICFG_ANON_30_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1930][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_30_RSVD_AMAP {
u32 dw[1931];
};
struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_31_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_32_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_32_RSVD_AMAP {
u32 dw[896];
};
/* This PCI configuration space register map is for the Networking Function of
* BladeEngine (Function 1).
*/
struct BE_PCICFG1_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[64]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 62 */
u8 rsvd25[32]; /* DWORD 63 */
u8 rsvd26[32]; /* DWORD 64 */
u8 rsvd27[32]; /* DWORD 65 */
u8 rsvd28[32]; /* DWORD 66 */
u8 rsvd29[32]; /* DWORD 67 */
u8 rsvd30[32]; /* DWORD 68 */
u8 rsvd31[32]; /* DWORD 69 */
u8 rsvd32[32]; /* DWORD 70 */
u8 rsvd33[32]; /* DWORD 71 */
u8 rsvd34[32]; /* DWORD 72 */
u8 rsvd35[32]; /* DWORD 73 */
u8 rsvd36[32]; /* DWORD 74 */
u8 rsvd37[128]; /* DWORD 75 */
u8 rsvd38[32]; /* DWORD 79 */
u8 rsvd39[1184]; /* DWORD 80 */
u8 rsvd40[61792]; /* DWORD 117 */
struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
u8 rsvd41[28672]; /* DWORD 2176 */
u8 rsvd42[32]; /* DWORD 3072 */
u8 rsvd43[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG1_CSRMAP_AMAP {
u32 dw[4096];
};
#endif /* __pcicfg_amap_h__ */

View File

@ -0,0 +1,111 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __post_codes_amap_h__
#define __post_codes_amap_h__
/* --- MGMT_HBA_POST_STAGE_ENUM --- */
#define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */
#define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a
go-ahed from the host. */
#define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */
#define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip
workaround */
#define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum
test start. */
#define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test
done. */
#define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */
#define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */
#define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */
#define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */
#define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */
#define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */
#define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */
#define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */
#define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to
DDR start. */
#define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load
to DDR done. */
#define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code
starts execution. */
#define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */
#define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target
Discovery Start. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target
Discovery Done. */
#define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration
option is set in SEEPROM */
#define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */
#define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message
to switch */
#define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */
#define POST_STAGE_PARSE_XML (2820) /* Parse XML file */
#define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from
TFTP server */
#define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */
#define POST_STAGE_RC_DONE (2823) /* Remote configuration
complete */
#define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done,
reboot required */
#define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */
#define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST
and ready. */
#define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an
unrecoverable error. The
lower 3 hex digits of the
stage code identify the
unique error code.
*/
/* This structure defines the format of the MPU semaphore
* register when used for POST.
*/
struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u8 stage[16]; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 iscsi_driver_loaded; /* DWORD 0 */
u8 option_rom_installed; /* DWORD 0 */
u8 iscsi_ip_conflict; /* DWORD 0 */
u8 iscsi_no_ip; /* DWORD 0 */
u8 backup_fw; /* DWORD 0 */
u8 error; /* DWORD 0 */
} __packed;
struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u32 dw[1];
};
/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
#define POST_BIT_ISCSI_LOADED (26)
#define POST_BIT_OPTROM_INST (27)
#define POST_BIT_BAD_IP_ADDR (28)
#define POST_BIT_NO_IP_ADDR (29)
#define POST_BIT_BACKUP_FW (30)
#define POST_BIT_ERROR (31)
/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
#define POST_ISCSI_DRIVER_LOADED (67108864)
#define POST_OPTROM_INSTALLED (134217728)
#define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456)
#define POST_ISCSI_NO_IP_ADDRESS (536870912)
#define POST_BACKUP_FW_LOADED (1073741824)
#define POST_FATAL_ERROR (2147483648)
#endif /* __post_codes_amap_h__ */

View File

@ -0,0 +1,68 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __regmap_amap_h__
#define __regmap_amap_h__
#include "pcicfg.h"
#include "ep.h"
#include "cev.h"
#include "mpu.h"
#include "doorbells.h"
/*
* This is the control and status register map for BladeEngine, showing
* the relative size and offset of each sub-module. The CSR registers
* are identical for the network and storage PCI functions. The
* CSR map is shown below, followed by details of each block,
* in sub-sections. The sub-sections begin with a description
* of CSRs that are instantiated in multiple blocks.
*/
struct BE_BLADE_ENGINE_CSRMAP_AMAP {
struct BE_MPU_CSRMAP_AMAP mpu;
u8 rsvd0[8192]; /* DWORD 256 */
u8 rsvd1[8192]; /* DWORD 512 */
struct BE_CEV_CSRMAP_AMAP cev;
u8 rsvd2[8192]; /* DWORD 1024 */
u8 rsvd3[8192]; /* DWORD 1280 */
u8 rsvd4[8192]; /* DWORD 1536 */
u8 rsvd5[8192]; /* DWORD 1792 */
u8 rsvd6[8192]; /* DWORD 2048 */
u8 rsvd7[8192]; /* DWORD 2304 */
u8 rsvd8[8192]; /* DWORD 2560 */
u8 rsvd9[8192]; /* DWORD 2816 */
u8 rsvd10[8192]; /* DWORD 3072 */
u8 rsvd11[8192]; /* DWORD 3328 */
u8 rsvd12[8192]; /* DWORD 3584 */
u8 rsvd13[8192]; /* DWORD 3840 */
u8 rsvd14[8192]; /* DWORD 4096 */
u8 rsvd15[8192]; /* DWORD 4352 */
u8 rsvd16[8192]; /* DWORD 4608 */
u8 rsvd17[8192]; /* DWORD 4864 */
u8 rsvd18[8192]; /* DWORD 5120 */
u8 rsvd19[8192]; /* DWORD 5376 */
u8 rsvd20[8192]; /* DWORD 5632 */
u8 rsvd21[8192]; /* DWORD 5888 */
u8 rsvd22[8192]; /* DWORD 6144 */
u8 rsvd23[17152][32]; /* DWORD 6400 */
} __packed;
struct BLADE_ENGINE_CSRMAP_AMAP {
u32 dw[23552];
};
#endif /* __regmap_amap_h__ */

View File

@ -0,0 +1,27 @@
config COMEDI
tristate "Data Acquision support (comedi)"
default N
---help---
Enable support a wide range of data acquision devices
for Linux.
config COMEDI_RT
tristate "Comedi Real-time support"
depends on COMEDI && RT
default N
---help---
Enable Real time support for the Comedi core.
config COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
depends on COMEDI && PCI
default N
---help---
Enable lots of comedi PCI drivers to be built
config COMEDI_USB_DRIVERS
tristate "Comedi USB drivers"
depends on COMEDI && USB
default N
---help---
Enable lots of comedi USB drivers to be built

View File

@ -0,0 +1,17 @@
obj-$(CONFIG_COMEDI) += comedi.o
obj-$(CONFIG_COMEDI_RT) += comedi_rt.o
obj-$(CONFIG_COMEDI) += kcomedilib/
obj-$(CONFIG_COMEDI) += drivers/
comedi-objs := \
comedi_fops.o \
proc.o \
range.o \
drivers.o \
comedi_compat32.o \
comedi_ksyms.o \
comedi_rt-objs := \
rt_pend_tq.o \
rt.o

View File

@ -0,0 +1,14 @@
TODO:
- checkpatch.pl cleanups
- Lindent
- remove all wrappers
- remove typedefs
- audit userspace interface
- reserve major number
- cleanup the individual comedi drivers as well
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
Ian Abbott <abbotti@mev.co.uk>
Frank Mori Hess <fmhess@users.sourceforge.net>
David Schleef <ds@schleef.org>

View File

@ -0,0 +1,916 @@
/*
include/comedi.h (installed as /usr/include/comedi.h)
header file for comedi
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _COMEDI_H
#define _COMEDI_H
#ifdef __cplusplus
extern "C" {
#endif
#define COMEDI_MAJORVERSION 0
#define COMEDI_MINORVERSION 7
#define COMEDI_MICROVERSION 76
#define VERSION "0.7.76"
/* comedi's major device number */
#define COMEDI_MAJOR 98
/*
maximum number of minor devices. This can be increased, although
kernel structures are currently statically allocated, thus you
don't want this to be much more than you actually use.
*/
#define COMEDI_NDEVICES 16
/* number of config options in the config structure */
#define COMEDI_NDEVCONFOPTS 32
/*length of nth chunk of firmware data*/
#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25
#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28
#define COMEDI_DEVCONF_AUX_DATA_HI 29 /* most significant 32 bits of pointer address (if needed) */
#define COMEDI_DEVCONF_AUX_DATA_LO 30 /* least significant 32 bits of pointer address */
#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */
/* max length of device and driver names */
#define COMEDI_NAMELEN 20
typedef unsigned int lsampl_t;
typedef unsigned short sampl_t;
/* packs and unpacks a channel/range number */
#define CR_PACK(chan, rng, aref) ((((aref)&0x3)<<24) | (((rng)&0xff)<<16) | (chan))
#define CR_PACK_FLAGS(chan, range, aref, flags) (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
#define CR_CHAN(a) ((a)&0xffff)
#define CR_RANGE(a) (((a)>>16)&0xff)
#define CR_AREF(a) (((a)>>24)&0x03)
#define CR_FLAGS_MASK 0xfc000000
#define CR_ALT_FILTER (1<<26)
#define CR_DITHER CR_ALT_FILTER
#define CR_DEGLITCH CR_ALT_FILTER
#define CR_ALT_SOURCE (1<<27)
#define CR_EDGE (1<<30)
#define CR_INVERT (1<<31)
#define AREF_GROUND 0x00 /* analog ref = analog ground */
#define AREF_COMMON 0x01 /* analog ref = analog common */
#define AREF_DIFF 0x02 /* analog ref = differential */
#define AREF_OTHER 0x03 /* analog ref = other (undefined) */
/* counters -- these are arbitrary values */
#define GPCT_RESET 0x0001
#define GPCT_SET_SOURCE 0x0002
#define GPCT_SET_GATE 0x0004
#define GPCT_SET_DIRECTION 0x0008
#define GPCT_SET_OPERATION 0x0010
#define GPCT_ARM 0x0020
#define GPCT_DISARM 0x0040
#define GPCT_GET_INT_CLK_FRQ 0x0080
#define GPCT_INT_CLOCK 0x0001
#define GPCT_EXT_PIN 0x0002
#define GPCT_NO_GATE 0x0004
#define GPCT_UP 0x0008
#define GPCT_DOWN 0x0010
#define GPCT_HWUD 0x0020
#define GPCT_SIMPLE_EVENT 0x0040
#define GPCT_SINGLE_PERIOD 0x0080
#define GPCT_SINGLE_PW 0x0100
#define GPCT_CONT_PULSE_OUT 0x0200
#define GPCT_SINGLE_PULSE_OUT 0x0400
/* instructions */
#define INSN_MASK_WRITE 0x8000000
#define INSN_MASK_READ 0x4000000
#define INSN_MASK_SPECIAL 0x2000000
#define INSN_READ (0 | INSN_MASK_READ)
#define INSN_WRITE (1 | INSN_MASK_WRITE)
#define INSN_BITS (2 | INSN_MASK_READ|INSN_MASK_WRITE)
#define INSN_CONFIG (3 | INSN_MASK_READ|INSN_MASK_WRITE)
#define INSN_GTOD (4 | INSN_MASK_READ|INSN_MASK_SPECIAL)
#define INSN_WAIT (5 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
#define INSN_INTTRIG (6 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
/* trigger flags */
/* These flags are used in comedi_trig structures */
#define TRIG_BOGUS 0x0001 /* do the motions */
#define TRIG_DITHER 0x0002 /* enable dithering */
#define TRIG_DEGLITCH 0x0004 /* enable deglitching */
/*#define TRIG_RT 0x0008 */ /* perform op in real time */
#define TRIG_CONFIG 0x0010 /* perform configuration, not triggering */
#define TRIG_WAKE_EOS 0x0020 /* wake up on end-of-scan events */
/*#define TRIG_WRITE 0x0040*/ /* write to bidirectional devices */
/* command flags */
/* These flags are used in comedi_cmd structures */
#define CMDF_PRIORITY 0x00000008 /* try to use a real-time interrupt while performing command */
#define TRIG_RT CMDF_PRIORITY /* compatibility definition */
#define CMDF_WRITE 0x00000040
#define TRIG_WRITE CMDF_WRITE /* compatibility definition */
#define CMDF_RAWDATA 0x00000080
#define COMEDI_EV_START 0x00040000
#define COMEDI_EV_SCAN_BEGIN 0x00080000
#define COMEDI_EV_CONVERT 0x00100000
#define COMEDI_EV_SCAN_END 0x00200000
#define COMEDI_EV_STOP 0x00400000
#define TRIG_ROUND_MASK 0x00030000
#define TRIG_ROUND_NEAREST 0x00000000
#define TRIG_ROUND_DOWN 0x00010000
#define TRIG_ROUND_UP 0x00020000
#define TRIG_ROUND_UP_NEXT 0x00030000
/* trigger sources */
#define TRIG_ANY 0xffffffff
#define TRIG_INVALID 0x00000000
#define TRIG_NONE 0x00000001 /* never trigger */
#define TRIG_NOW 0x00000002 /* trigger now + N ns */
#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
#define TRIG_TIME 0x00000008 /* trigger at time N ns */
#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
#define TRIG_EXT 0x00000040 /* trigger on external signal N */
#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
#define TRIG_OTHER 0x00000100 /* driver defined */
/* subdevice flags */
#define SDF_BUSY 0x0001 /* device is busy */
#define SDF_BUSY_OWNER 0x0002 /* device is busy with your job */
#define SDF_LOCKED 0x0004 /* subdevice is locked */
#define SDF_LOCK_OWNER 0x0008 /* you own lock */
#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */
#define SDF_FLAGS 0x0020 /* flags depend on channel */
#define SDF_RANGETYPE 0x0040 /* range type depends on channel */
#define SDF_MODE0 0x0080 /* can do mode 0 */
#define SDF_MODE1 0x0100 /* can do mode 1 */
#define SDF_MODE2 0x0200 /* can do mode 2 */
#define SDF_MODE3 0x0400 /* can do mode 3 */
#define SDF_MODE4 0x0800 /* can do mode 4 */
#define SDF_CMD 0x1000 /* can do commands (deprecated) */
#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
#define SDF_CMD_WRITE 0x4000 /* can do output commands */
#define SDF_CMD_READ 0x8000 /* can do input commands */
#define SDF_READABLE 0x00010000 /* subdevice can be read (e.g. analog input) */
#define SDF_WRITABLE 0x00020000 /* subdevice can be written (e.g. analog output) */
#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */
#define SDF_INTERNAL 0x00040000 /* subdevice does not have externally visible lines */
#define SDF_RT 0x00080000 /* DEPRECATED: subdevice is RT capable */
#define SDF_GROUND 0x00100000 /* can do aref=ground */
#define SDF_COMMON 0x00200000 /* can do aref=common */
#define SDF_DIFF 0x00400000 /* can do aref=diff */
#define SDF_OTHER 0x00800000 /* can do aref=other */
#define SDF_DITHER 0x01000000 /* can do dithering */
#define SDF_DEGLITCH 0x02000000 /* can do deglitching */
#define SDF_MMAP 0x04000000 /* can do mmap() */
#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */
#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */
#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */
/* re recyle these flags for PWM */
#define SDF_PWM_COUNTER SDF_MODE0 /* PWM can automatically switch off */
#define SDF_PWM_HBRIDGE SDF_MODE1 /* PWM is signed (H-bridge) */
/* subdevice types */
enum comedi_subdevice_type {
COMEDI_SUBD_UNUSED, /* unused by driver */
COMEDI_SUBD_AI, /* analog input */
COMEDI_SUBD_AO, /* analog output */
COMEDI_SUBD_DI, /* digital input */
COMEDI_SUBD_DO, /* digital output */
COMEDI_SUBD_DIO, /* digital input/output */
COMEDI_SUBD_COUNTER, /* counter */
COMEDI_SUBD_TIMER, /* timer */
COMEDI_SUBD_MEMORY, /* memory, EEPROM, DPRAM */
COMEDI_SUBD_CALIB, /* calibration DACs */
COMEDI_SUBD_PROC, /* processor, DSP */
COMEDI_SUBD_SERIAL, /* serial IO */
COMEDI_SUBD_PWM /* PWM */
};
/* configuration instructions */
enum configuration_ids {
INSN_CONFIG_DIO_INPUT = 0,
INSN_CONFIG_DIO_OUTPUT = 1,
INSN_CONFIG_DIO_OPENDRAIN = 2,
INSN_CONFIG_ANALOG_TRIG = 16,
/* INSN_CONFIG_WAVEFORM = 17, */
/* INSN_CONFIG_TRIG = 18, */
/* INSN_CONFIG_COUNTER = 19, */
INSN_CONFIG_ALT_SOURCE = 20,
INSN_CONFIG_DIGITAL_TRIG = 21,
INSN_CONFIG_BLOCK_SIZE = 22,
INSN_CONFIG_TIMER_1 = 23,
INSN_CONFIG_FILTER = 24,
INSN_CONFIG_CHANGE_NOTIFY = 25,
/*ALPHA*/ INSN_CONFIG_SERIAL_CLOCK = 26,
INSN_CONFIG_BIDIRECTIONAL_DATA = 27,
INSN_CONFIG_DIO_QUERY = 28,
INSN_CONFIG_PWM_OUTPUT = 29,
INSN_CONFIG_GET_PWM_OUTPUT = 30,
INSN_CONFIG_ARM = 31,
INSN_CONFIG_DISARM = 32,
INSN_CONFIG_GET_COUNTER_STATUS = 33,
INSN_CONFIG_RESET = 34,
INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001, /* Use CTR as single pulsegenerator */
INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002, /* Use CTR as pulsetraingenerator */
INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003, /* Use the counter as encoder */
INSN_CONFIG_SET_GATE_SRC = 2001, /* Set gate source */
INSN_CONFIG_GET_GATE_SRC = 2002, /* Get gate source */
INSN_CONFIG_SET_CLOCK_SRC = 2003, /* Set master clock source */
INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
/* INSN_CONFIG_GET_OTHER_SRC = 2006,*/ /* Get other source */
INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE, /* Get size in bytes of
subdevice's on-board fifos
used during streaming
input/output */
INSN_CONFIG_SET_COUNTER_MODE = 4097,
INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE, /* deprecated */
INSN_CONFIG_8254_READ_STATUS = 4098,
INSN_CONFIG_SET_ROUTING = 4099,
INSN_CONFIG_GET_ROUTING = 4109,
/* PWM */
INSN_CONFIG_PWM_SET_PERIOD = 5000, /* sets frequency */
INSN_CONFIG_PWM_GET_PERIOD = 5001, /* gets frequency */
INSN_CONFIG_GET_PWM_STATUS = 5002, /* is it running? */
INSN_CONFIG_PWM_SET_H_BRIDGE = 5003, /* sets H bridge: duty cycle and sign bit for a relay at the same time*/
INSN_CONFIG_PWM_GET_H_BRIDGE = 5004 /* gets H bridge data: duty cycle and the sign bit */
};
enum comedi_io_direction {
COMEDI_INPUT = 0,
COMEDI_OUTPUT = 1,
COMEDI_OPENDRAIN = 2
};
enum comedi_support_level {
COMEDI_UNKNOWN_SUPPORT = 0,
COMEDI_SUPPORTED,
COMEDI_UNSUPPORTED
};
/* ioctls */
#define CIO 'd'
#define COMEDI_DEVCONFIG _IOW(CIO, 0, comedi_devconfig)
#define COMEDI_DEVINFO _IOR(CIO, 1, comedi_devinfo)
#define COMEDI_SUBDINFO _IOR(CIO, 2, comedi_subdinfo)
#define COMEDI_CHANINFO _IOR(CIO, 3, comedi_chaninfo)
#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig)
#define COMEDI_LOCK _IO(CIO, 5)
#define COMEDI_UNLOCK _IO(CIO, 6)
#define COMEDI_CANCEL _IO(CIO, 7)
#define COMEDI_RANGEINFO _IOR(CIO, 8, comedi_rangeinfo)
#define COMEDI_CMD _IOR(CIO, 9, comedi_cmd)
#define COMEDI_CMDTEST _IOR(CIO, 10, comedi_cmd)
#define COMEDI_INSNLIST _IOR(CIO, 11, comedi_insnlist)
#define COMEDI_INSN _IOR(CIO, 12, comedi_insn)
#define COMEDI_BUFCONFIG _IOR(CIO, 13, comedi_bufconfig)
#define COMEDI_BUFINFO _IOWR(CIO, 14, comedi_bufinfo)
#define COMEDI_POLL _IO(CIO, 15)
/* structures */
typedef struct comedi_trig_struct comedi_trig;
typedef struct comedi_cmd_struct comedi_cmd;
typedef struct comedi_insn_struct comedi_insn;
typedef struct comedi_insnlist_struct comedi_insnlist;
typedef struct comedi_chaninfo_struct comedi_chaninfo;
typedef struct comedi_subdinfo_struct comedi_subdinfo;
typedef struct comedi_devinfo_struct comedi_devinfo;
typedef struct comedi_devconfig_struct comedi_devconfig;
typedef struct comedi_rangeinfo_struct comedi_rangeinfo;
typedef struct comedi_krange_struct comedi_krange;
typedef struct comedi_bufconfig_struct comedi_bufconfig;
typedef struct comedi_bufinfo_struct comedi_bufinfo;
struct comedi_trig_struct {
unsigned int subdev; /* subdevice */
unsigned int mode; /* mode */
unsigned int flags;
unsigned int n_chan; /* number of channels */
unsigned int *chanlist; /* channel/range list */
sampl_t *data; /* data list, size depends on subd flags */
unsigned int n; /* number of scans */
unsigned int trigsrc;
unsigned int trigvar;
unsigned int trigvar1;
unsigned int data_len;
unsigned int unused[3];
};
struct comedi_insn_struct {
unsigned int insn;
unsigned int n;
lsampl_t *data;
unsigned int subdev;
unsigned int chanspec;
unsigned int unused[3];
};
struct comedi_insnlist_struct {
unsigned int n_insns;
comedi_insn *insns;
};
struct comedi_cmd_struct {
unsigned int subdev;
unsigned int flags;
unsigned int start_src;
unsigned int start_arg;
unsigned int scan_begin_src;
unsigned int scan_begin_arg;
unsigned int convert_src;
unsigned int convert_arg;
unsigned int scan_end_src;
unsigned int scan_end_arg;
unsigned int stop_src;
unsigned int stop_arg;
unsigned int *chanlist; /* channel/range list */
unsigned int chanlist_len;
sampl_t *data; /* data list, size depends on subd flags */
unsigned int data_len;
};
struct comedi_chaninfo_struct {
unsigned int subdev;
lsampl_t *maxdata_list;
unsigned int *flaglist;
unsigned int *rangelist;
unsigned int unused[4];
};
struct comedi_rangeinfo_struct {
unsigned int range_type;
void *range_ptr;
};
struct comedi_krange_struct {
int min; /* fixed point, multiply by 1e-6 */
int max; /* fixed point, multiply by 1e-6 */
unsigned int flags;
};
struct comedi_subdinfo_struct {
unsigned int type;
unsigned int n_chan;
unsigned int subd_flags;
unsigned int timer_type;
unsigned int len_chanlist;
lsampl_t maxdata;
unsigned int flags; /* channel flags */
unsigned int range_type; /* lookup in kernel */
unsigned int settling_time_0;
unsigned insn_bits_support; /* see support_level enum for values*/
unsigned int unused[8];
};
struct comedi_devinfo_struct {
unsigned int version_code;
unsigned int n_subdevs;
char driver_name[COMEDI_NAMELEN];
char board_name[COMEDI_NAMELEN];
int read_subdevice;
int write_subdevice;
int unused[30];
};
struct comedi_devconfig_struct {
char board_name[COMEDI_NAMELEN];
int options[COMEDI_NDEVCONFOPTS];
};
struct comedi_bufconfig_struct {
unsigned int subdevice;
unsigned int flags;
unsigned int maximum_size;
unsigned int size;
unsigned int unused[4];
};
struct comedi_bufinfo_struct {
unsigned int subdevice;
unsigned int bytes_read;
unsigned int buf_write_ptr;
unsigned int buf_read_ptr;
unsigned int buf_write_count;
unsigned int buf_read_count;
unsigned int bytes_written;
unsigned int unused[4];
};
/* range stuff */
#define __RANGE(a, b) ((((a)&0xffff)<<16)|((b)&0xffff))
#define RANGE_OFFSET(a) (((a)>>16)&0xffff)
#define RANGE_LENGTH(b) ((b)&0xffff)
#define RF_UNIT(flags) ((flags)&0xff)
#define RF_EXTERNAL (1<<8)
#define UNIT_volt 0
#define UNIT_mA 1
#define UNIT_none 2
#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff)
/* callback stuff */
/* only relevant to kernel modules. */
#define COMEDI_CB_EOS 1 /* end of scan */
#define COMEDI_CB_EOA 2 /* end of acquisition */
#define COMEDI_CB_BLOCK 4 /* DEPRECATED: convenient block size */
#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
#define COMEDI_CB_ERROR 16 /* card error during acquisition */
#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
/**********************************************************/
/* everything after this line is ALPHA */
/**********************************************************/
/*
8254 specific configuration.
It supports two config commands:
0 ID: INSN_CONFIG_SET_COUNTER_MODE
1 8254 Mode
I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
OR'ed with:
I8254_BCD, I8254_BINARY
0 ID: INSN_CONFIG_8254_READ_STATUS
1 <-- Status byte returned here.
B7 = Output
B6 = NULL Count
B5 - B0 Current mode.
*/
enum i8254_mode {
I8254_MODE0 = (0 << 1), /* Interrupt on terminal count */
I8254_MODE1 = (1 << 1), /* Hardware retriggerable one-shot */
I8254_MODE2 = (2 << 1), /* Rate generator */
I8254_MODE3 = (3 << 1), /* Square wave mode */
I8254_MODE4 = (4 << 1), /* Software triggered strobe */
I8254_MODE5 = (5 << 1), /* Hardware triggered strobe (retriggerable) */
I8254_BCD = 1, /* use binary-coded decimal instead of binary (pretty useless) */
I8254_BINARY = 0
};
static inline unsigned NI_USUAL_PFI_SELECT(unsigned pfi_channel)
{
if (pfi_channel < 10)
return 0x1 + pfi_channel;
else
return 0xb + pfi_channel;
}
static inline unsigned NI_USUAL_RTSI_SELECT(unsigned rtsi_channel)
{
if (rtsi_channel < 7)
return 0xb + rtsi_channel;
else
return 0x1b;
}
/* mode bits for NI general-purpose counters, set with
* INSN_CONFIG_SET_COUNTER_MODE */
#define NI_GPCT_COUNTING_MODE_SHIFT 16
#define NI_GPCT_INDEX_PHASE_BITSHIFT 20
#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24
enum ni_gpct_mode_bits {
NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4,
NI_GPCT_EDGE_GATE_MODE_MASK = 0x18,
NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0,
NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8,
NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10,
NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18,
NI_GPCT_STOP_MODE_MASK = 0x60,
NI_GPCT_STOP_ON_GATE_BITS = 0x00,
NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20,
NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40,
NI_GPCT_LOAD_B_SELECT_BIT = 0x80,
NI_GPCT_OUTPUT_MODE_MASK = 0x300,
NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100,
NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200,
NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300,
NI_GPCT_HARDWARE_DISARM_MASK = 0xc00,
NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000,
NI_GPCT_DISARM_AT_TC_BITS = 0x400,
NI_GPCT_DISARM_AT_GATE_BITS = 0x800,
NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00,
NI_GPCT_LOADING_ON_TC_BIT = 0x1000,
NI_GPCT_LOADING_ON_GATE_BIT = 0x4000,
NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_NORMAL_BITS =
0x0 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS =
0x1 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS =
0x2 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS =
0x3 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS =
0x4 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS =
0x6 << NI_GPCT_COUNTING_MODE_SHIFT,
NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS =
0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT,
NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS =
0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT,
NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS =
0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT,
NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS =
0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
NI_GPCT_INDEX_ENABLE_BIT = 0x400000,
NI_GPCT_COUNTING_DIRECTION_MASK =
0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
NI_GPCT_COUNTING_DIRECTION_DOWN_BITS =
0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
NI_GPCT_COUNTING_DIRECTION_UP_BITS =
0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS =
0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS =
0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000,
NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0,
NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000,
NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000,
NI_GPCT_OR_GATE_BIT = 0x10000000,
NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000
};
/* Bits for setting a clock source with
* INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters. */
enum ni_gpct_clock_source_bits {
NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f,
NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0,
NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1,
NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2,
NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6, /* NI 660x-specific */
NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000, /* divide source by 2 */
NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000, /* divide source by 8 */
NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
};
static inline unsigned NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(unsigned n)
{
/* NI 660x-specific */
return 0x10 + n;
}
static inline unsigned NI_GPCT_RTSI_CLOCK_SRC_BITS(unsigned n)
{
return 0x18 + n;
}
static inline unsigned NI_GPCT_PFI_CLOCK_SRC_BITS(unsigned n)
{
/* no pfi on NI 660x */
return 0x20 + n;
}
/* Possibilities for setting a gate source with
INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
May be bitwise-or'd with CR_EDGE or CR_INVERT. */
enum ni_gpct_gate_select {
/* m-series gates */
NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0,
NI_GPCT_AI_START2_GATE_SELECT = 0x12,
NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13,
NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14,
NI_GPCT_AI_START1_GATE_SELECT = 0x1c,
NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d,
NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e,
NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f,
/* more gates for 660x */
NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100,
NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101,
/* more gates for 660x "second gate" */
NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
/* m-series "second gate" sources are unknown,
we should add them here with an offset of 0x300 when known. */
NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
};
static inline unsigned NI_GPCT_GATE_PIN_GATE_SELECT(unsigned n)
{
return 0x102 + n;
}
static inline unsigned NI_GPCT_RTSI_GATE_SELECT(unsigned n)
{
return NI_USUAL_RTSI_SELECT(n);
}
static inline unsigned NI_GPCT_PFI_GATE_SELECT(unsigned n)
{
return NI_USUAL_PFI_SELECT(n);
}
static inline unsigned NI_GPCT_UP_DOWN_PIN_GATE_SELECT(unsigned n)
{
return 0x202 + n;
}
/* Possibilities for setting a source with
INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. */
enum ni_gpct_other_index {
NI_GPCT_SOURCE_ENCODER_A,
NI_GPCT_SOURCE_ENCODER_B,
NI_GPCT_SOURCE_ENCODER_Z
};
enum ni_gpct_other_select {
/* m-series gates */
/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
};
static inline unsigned NI_GPCT_PFI_OTHER_SELECT(unsigned n)
{
return NI_USUAL_PFI_SELECT(n);
}
/* start sources for ni general-purpose counters for use with
INSN_CONFIG_ARM */
enum ni_gpct_arm_source {
NI_GPCT_ARM_IMMEDIATE = 0x0,
NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, /* Start both the counter and
the adjacent paired counter
simultaneously */
/* NI doesn't document bits for selecting hardware arm triggers. If
* the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
* significant bits (3 bits for 660x or 5 bits for m-series) through to
* the hardware. This will at least allow someone to figure out what
* the bits do later. */
NI_GPCT_ARM_UNKNOWN = 0x1000,
};
/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
enum ni_gpct_filter_select {
NI_GPCT_FILTER_OFF = 0x0,
NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1,
NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2,
NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3,
NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4,
NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5,
NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6
};
/* PFI digital filtering options for ni m-series for use with
* INSN_CONFIG_FILTER. */
enum ni_pfi_filter_select {
NI_PFI_FILTER_OFF = 0x0,
NI_PFI_FILTER_125ns = 0x1,
NI_PFI_FILTER_6425ns = 0x2,
NI_PFI_FILTER_2550us = 0x3
};
/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */
enum ni_mio_clock_source {
NI_MIO_INTERNAL_CLOCK = 0,
NI_MIO_RTSI_CLOCK = 1, /* doesn't work for m-series, use
NI_MIO_PLL_RTSI_CLOCK() */
/* the NI_MIO_PLL_* sources are m-series only */
NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2,
NI_MIO_PLL_PXI10_CLOCK = 3,
NI_MIO_PLL_RTSI0_CLOCK = 4
};
static inline unsigned NI_MIO_PLL_RTSI_CLOCK(unsigned rtsi_channel)
{
return NI_MIO_PLL_RTSI0_CLOCK + rtsi_channel;
}
/* Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING.
The numbers assigned are not arbitrary, they correspond to the bits required
to program the board. */
enum ni_rtsi_routing {
NI_RTSI_OUTPUT_ADR_START1 = 0,
NI_RTSI_OUTPUT_ADR_START2 = 1,
NI_RTSI_OUTPUT_SCLKG = 2,
NI_RTSI_OUTPUT_DACUPDN = 3,
NI_RTSI_OUTPUT_DA_START1 = 4,
NI_RTSI_OUTPUT_G_SRC0 = 5,
NI_RTSI_OUTPUT_G_GATE0 = 6,
NI_RTSI_OUTPUT_RGOUT0 = 7,
NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
NI_RTSI_OUTPUT_RTSI_OSC = 12 /* pre-m-series always have RTSI clock
on line 7 */
};
static inline unsigned NI_RTSI_OUTPUT_RTSI_BRD(unsigned n)
{
return NI_RTSI_OUTPUT_RTSI_BRD_0 + n;
}
/* Signals which can be routed to an NI PFI pin on an m-series board with
* INSN_CONFIG_SET_ROUTING. These numbers are also returned by
* INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing
* cannot be changed. The numbers assigned are not arbitrary, they correspond
* to the bits required to program the board. */
enum ni_pfi_routing {
NI_PFI_OUTPUT_PFI_DEFAULT = 0,
NI_PFI_OUTPUT_AI_START1 = 1,
NI_PFI_OUTPUT_AI_START2 = 2,
NI_PFI_OUTPUT_AI_CONVERT = 3,
NI_PFI_OUTPUT_G_SRC1 = 4,
NI_PFI_OUTPUT_G_GATE1 = 5,
NI_PFI_OUTPUT_AO_UPDATE_N = 6,
NI_PFI_OUTPUT_AO_START1 = 7,
NI_PFI_OUTPUT_AI_START_PULSE = 8,
NI_PFI_OUTPUT_G_SRC0 = 9,
NI_PFI_OUTPUT_G_GATE0 = 10,
NI_PFI_OUTPUT_EXT_STROBE = 11,
NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12,
NI_PFI_OUTPUT_GOUT0 = 13,
NI_PFI_OUTPUT_GOUT1 = 14,
NI_PFI_OUTPUT_FREQ_OUT = 15,
NI_PFI_OUTPUT_PFI_DO = 16,
NI_PFI_OUTPUT_I_ATRIG = 17,
NI_PFI_OUTPUT_RTSI0 = 18,
NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26,
NI_PFI_OUTPUT_SCXI_TRIG1 = 27,
NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28,
NI_PFI_OUTPUT_CDI_SAMPLE = 29,
NI_PFI_OUTPUT_CDO_UPDATE = 30
};
static inline unsigned NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel)
{
return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
}
/* Signals which can be routed to output on a NI PFI pin on a 660x board
with INSN_CONFIG_SET_ROUTING. The numbers assigned are
not arbitrary, they correspond to the bits required
to program the board. Lines 0 to 7 can only be set to
NI_660X_PFI_OUTPUT_DIO. Lines 32 to 39 can only be set to
NI_660X_PFI_OUTPUT_COUNTER. */
enum ni_660x_pfi_routing {
NI_660X_PFI_OUTPUT_COUNTER = 1, /* counter */
NI_660X_PFI_OUTPUT_DIO = 2, /* static digital output */
};
/* NI External Trigger lines. These values are not arbitrary, but are related
* to the bits required to program the board (offset by 1 for historical
* reasons). */
static inline unsigned NI_EXT_PFI(unsigned pfi_channel)
{
return NI_USUAL_PFI_SELECT(pfi_channel) - 1;
}
static inline unsigned NI_EXT_RTSI(unsigned rtsi_channel)
{
return NI_USUAL_RTSI_SELECT(rtsi_channel) - 1;
}
/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */
enum comedi_counter_status_flags {
COMEDI_COUNTER_ARMED = 0x1,
COMEDI_COUNTER_COUNTING = 0x2,
COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
};
/* Clock sources for CDIO subdevice on NI m-series boards. Used as the
* scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
* with CR_INVERT to change polarity. */
enum ni_m_series_cdio_scan_begin_src {
NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0,
NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18,
NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19,
NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20,
NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28,
NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29,
NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30,
NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31,
NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
};
static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
{
return NI_USUAL_PFI_SELECT(pfi_channel);
}
static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
{
return NI_USUAL_RTSI_SELECT(rtsi_channel);
}
/* scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
* boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to
* change polarity. */
static inline unsigned NI_AO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
{
return NI_USUAL_PFI_SELECT(pfi_channel);
}
static inline unsigned NI_AO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
{
return NI_USUAL_RTSI_SELECT(rtsi_channel);
}
/* Bits for setting a clock source with
* INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice. */
enum ni_freq_out_clock_source_bits {
NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC, /* 10 MHz */
NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC /* 100 KHz */
};
/* Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
* 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
enum amplc_dio_clock_source {
AMPLC_DIO_CLK_CLKN, /* per channel external clock
input/output pin (pin is only an
input when clock source set to this
value, otherwise it is an output) */
AMPLC_DIO_CLK_10MHZ, /* 10 MHz internal clock */
AMPLC_DIO_CLK_1MHZ, /* 1 MHz internal clock */
AMPLC_DIO_CLK_100KHZ, /* 100 kHz internal clock */
AMPLC_DIO_CLK_10KHZ, /* 10 kHz internal clock */
AMPLC_DIO_CLK_1KHZ, /* 1 kHz internal clock */
AMPLC_DIO_CLK_OUTNM1, /* output of preceding counter channel
(for channel 0, preceding counter
channel is channel 2 on preceding
counter subdevice, for first counter
subdevice, preceding counter
subdevice is the last counter
subdevice) */
AMPLC_DIO_CLK_EXT /* per chip external input pin */
};
/* Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for
* 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
enum amplc_dio_gate_source {
AMPLC_DIO_GAT_VCC, /* internal high logic level */
AMPLC_DIO_GAT_GND, /* internal low logic level */
AMPLC_DIO_GAT_GATN, /* per channel external gate input */
AMPLC_DIO_GAT_NOUTNM2, /* negated output of counter channel
minus 2 (for channels 0 or 1,
channel minus 2 is channel 1 or 2 on
the preceding counter subdevice, for
the first counter subdevice the
preceding counter subdevice is the
last counter subdevice) */
AMPLC_DIO_GAT_RESERVED4,
AMPLC_DIO_GAT_RESERVED5,
AMPLC_DIO_GAT_RESERVED6,
AMPLC_DIO_GAT_RESERVED7
};
#ifdef __cplusplus
}
#endif
#endif /* _COMEDI_H */

View File

@ -0,0 +1,597 @@
/*
comedi/comedi_compat32.c
32-bit ioctl compatibility for 64-bit comedi kernel module.
Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define __NO_VERSION__
#include "comedi.h"
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include "comedi_compat32.h"
#ifdef CONFIG_COMPAT
#ifndef HAVE_COMPAT_IOCTL
#include <linux/ioctl32.h> /* for (un)register_ioctl32_conversion */
#endif
#define COMEDI32_CHANINFO _IOR(CIO,3,comedi32_chaninfo)
#define COMEDI32_RANGEINFO _IOR(CIO,8,comedi32_rangeinfo)
/* N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
* It's too late to change it now, but it only affects the command number. */
#define COMEDI32_CMD _IOR(CIO,9,comedi32_cmd)
/* N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
* It's too late to change it now, but it only affects the command number. */
#define COMEDI32_CMDTEST _IOR(CIO,10,comedi32_cmd)
#define COMEDI32_INSNLIST _IOR(CIO,11,comedi32_insnlist)
#define COMEDI32_INSN _IOR(CIO,12,comedi32_insn)
typedef struct comedi32_chaninfo_struct {
unsigned int subdev;
compat_uptr_t maxdata_list; /* 32-bit 'lsampl_t *' */
compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
unsigned int unused[4];
} comedi32_chaninfo;
typedef struct comedi32_rangeinfo_struct {
unsigned int range_type;
compat_uptr_t range_ptr; /* 32-bit 'void *' */
} comedi32_rangeinfo;
typedef struct comedi32_cmd_struct {
unsigned int subdev;
unsigned int flags;
unsigned int start_src;
unsigned int start_arg;
unsigned int scan_begin_src;
unsigned int scan_begin_arg;
unsigned int convert_src;
unsigned int convert_arg;
unsigned int scan_end_src;
unsigned int scan_end_arg;
unsigned int stop_src;
unsigned int stop_arg;
compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
unsigned int chanlist_len;
compat_uptr_t data; /* 32-bit 'sampl_t *' */
unsigned int data_len;
} comedi32_cmd;
typedef struct comedi32_insn_struct {
unsigned int insn;
unsigned int n;
compat_uptr_t data; /* 32-bit 'lsampl_t *' */
unsigned int subdev;
unsigned int chanspec;
unsigned int unused[3];
} comedi32_insn;
typedef struct comedi32_insnlist_struct {
unsigned int n_insns;
compat_uptr_t insns; /* 32-bit 'comedi_insn *' */
} comedi32_insnlist;
/* Handle translated ioctl. */
static int translated_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (!file->f_op) {
return -ENOTTY;
}
#ifdef HAVE_UNLOCKED_IOCTL
if (file->f_op->unlocked_ioctl) {
int rc = (int)(*file->f_op->unlocked_ioctl)(file, cmd, arg);
if (rc == -ENOIOCTLCMD) {
rc = -ENOTTY;
}
return rc;
}
#endif
if (file->f_op->ioctl) {
int rc;
lock_kernel();
rc = (*file->f_op->ioctl)(file->f_dentry->d_inode,
file, cmd, arg);
unlock_kernel();
return rc;
}
return -ENOTTY;
}
/* Handle 32-bit COMEDI_CHANINFO ioctl. */
static int compat_chaninfo(struct file *file, unsigned long arg)
{
comedi_chaninfo __user *chaninfo;
comedi32_chaninfo __user *chaninfo32;
int err;
union {
unsigned int uint;
compat_uptr_t uptr;
} temp;
chaninfo32 = compat_ptr(arg);
chaninfo = compat_alloc_user_space(sizeof(*chaninfo));
/* Copy chaninfo structure. Ignore unused members. */
if (!access_ok(VERIFY_READ, chaninfo32, sizeof(*chaninfo32))
|| !access_ok(VERIFY_WRITE, chaninfo,
sizeof(*chaninfo))) {
return -EFAULT;
}
err = 0;
err |= __get_user(temp.uint, &chaninfo32->subdev);
err |= __put_user(temp.uint, &chaninfo->subdev);
err |= __get_user(temp.uptr, &chaninfo32->maxdata_list);
err |= __put_user(compat_ptr(temp.uptr), &chaninfo->maxdata_list);
err |= __get_user(temp.uptr, &chaninfo32->flaglist);
err |= __put_user(compat_ptr(temp.uptr), &chaninfo->flaglist);
err |= __get_user(temp.uptr, &chaninfo32->rangelist);
err |= __put_user(compat_ptr(temp.uptr), &chaninfo->rangelist);
if (err) {
return -EFAULT;
}
return translated_ioctl(file, COMEDI_CHANINFO, (unsigned long)chaninfo);
}
/* Handle 32-bit COMEDI_RANGEINFO ioctl. */
static int compat_rangeinfo(struct file *file, unsigned long arg)
{
comedi_rangeinfo __user *rangeinfo;
comedi32_rangeinfo __user *rangeinfo32;
int err;
union {
unsigned int uint;
compat_uptr_t uptr;
} temp;
rangeinfo32 = compat_ptr(arg);
rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo));
/* Copy rangeinfo structure. */
if (!access_ok(VERIFY_READ, rangeinfo32, sizeof(*rangeinfo32))
|| !access_ok(VERIFY_WRITE, rangeinfo,
sizeof(*rangeinfo))) {
return -EFAULT;
}
err = 0;
err |= __get_user(temp.uint, &rangeinfo32->range_type);
err |= __put_user(temp.uint, &rangeinfo->range_type);
err |= __get_user(temp.uptr, &rangeinfo32->range_ptr);
err |= __put_user(compat_ptr(temp.uptr), &rangeinfo->range_ptr);
if (err) {
return -EFAULT;
}
return translated_ioctl(file, COMEDI_RANGEINFO,
(unsigned long)rangeinfo);
}
/* Copy 32-bit cmd structure to native cmd structure. */
static int get_compat_cmd(comedi_cmd __user *cmd,
comedi32_cmd __user *cmd32)
{
int err;
union {
unsigned int uint;
compat_uptr_t uptr;
} temp;
/* Copy cmd structure. */
if (!access_ok(VERIFY_READ, cmd32, sizeof(*cmd32))
|| !access_ok(VERIFY_WRITE, cmd, sizeof(*cmd))) {
return -EFAULT;
}
err = 0;
err |= __get_user(temp.uint, &cmd32->subdev);
err |= __put_user(temp.uint, &cmd->subdev);
err |= __get_user(temp.uint, &cmd32->flags);
err |= __put_user(temp.uint, &cmd->flags);
err |= __get_user(temp.uint, &cmd32->start_src);
err |= __put_user(temp.uint, &cmd->start_src);
err |= __get_user(temp.uint, &cmd32->start_arg);
err |= __put_user(temp.uint, &cmd->start_arg);
err |= __get_user(temp.uint, &cmd32->scan_begin_src);
err |= __put_user(temp.uint, &cmd->scan_begin_src);
err |= __get_user(temp.uint, &cmd32->scan_begin_arg);
err |= __put_user(temp.uint, &cmd->scan_begin_arg);
err |= __get_user(temp.uint, &cmd32->convert_src);
err |= __put_user(temp.uint, &cmd->convert_src);
err |= __get_user(temp.uint, &cmd32->convert_arg);
err |= __put_user(temp.uint, &cmd->convert_arg);
err |= __get_user(temp.uint, &cmd32->scan_end_src);
err |= __put_user(temp.uint, &cmd->scan_end_src);
err |= __get_user(temp.uint, &cmd32->scan_end_arg);
err |= __put_user(temp.uint, &cmd->scan_end_arg);
err |= __get_user(temp.uint, &cmd32->stop_src);
err |= __put_user(temp.uint, &cmd->stop_src);
err |= __get_user(temp.uint, &cmd32->stop_arg);
err |= __put_user(temp.uint, &cmd->stop_arg);
err |= __get_user(temp.uptr, &cmd32->chanlist);
err |= __put_user(compat_ptr(temp.uptr), &cmd->chanlist);
err |= __get_user(temp.uint, &cmd32->chanlist_len);
err |= __put_user(temp.uint, &cmd->chanlist_len);
err |= __get_user(temp.uptr, &cmd32->data);
err |= __put_user(compat_ptr(temp.uptr), &cmd->data);
err |= __get_user(temp.uint, &cmd32->data_len);
err |= __put_user(temp.uint, &cmd->data_len);
return err ? -EFAULT : 0;
}
/* Copy native cmd structure to 32-bit cmd structure. */
static int put_compat_cmd(comedi32_cmd __user *cmd32, comedi_cmd __user *cmd)
{
int err;
unsigned int temp;
/* Copy back most of cmd structure. */
/* Assume the pointer values are already valid. */
/* (Could use ptr_to_compat() to set them, but that wasn't implemented
* until kernel version 2.6.11.) */
if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd))
|| !access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32))) {
return -EFAULT;
}
err = 0;
err |= __get_user(temp, &cmd->subdev);
err |= __put_user(temp, &cmd32->subdev);
err |= __get_user(temp, &cmd->flags);
err |= __put_user(temp, &cmd32->flags);
err |= __get_user(temp, &cmd->start_src);
err |= __put_user(temp, &cmd32->start_src);
err |= __get_user(temp, &cmd->start_arg);
err |= __put_user(temp, &cmd32->start_arg);
err |= __get_user(temp, &cmd->scan_begin_src);
err |= __put_user(temp, &cmd32->scan_begin_src);
err |= __get_user(temp, &cmd->scan_begin_arg);
err |= __put_user(temp, &cmd32->scan_begin_arg);
err |= __get_user(temp, &cmd->convert_src);
err |= __put_user(temp, &cmd32->convert_src);
err |= __get_user(temp, &cmd->convert_arg);
err |= __put_user(temp, &cmd32->convert_arg);
err |= __get_user(temp, &cmd->scan_end_src);
err |= __put_user(temp, &cmd32->scan_end_src);
err |= __get_user(temp, &cmd->scan_end_arg);
err |= __put_user(temp, &cmd32->scan_end_arg);
err |= __get_user(temp, &cmd->stop_src);
err |= __put_user(temp, &cmd32->stop_src);
err |= __get_user(temp, &cmd->stop_arg);
err |= __put_user(temp, &cmd32->stop_arg);
/* Assume chanlist pointer is unchanged. */
err |= __get_user(temp, &cmd->chanlist_len);
err |= __put_user(temp, &cmd32->chanlist_len);
/* Assume data pointer is unchanged. */
err |= __get_user(temp, &cmd->data_len);
err |= __put_user(temp, &cmd32->data_len);
return err ? -EFAULT : 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
static int compat_cmd(struct file *file, unsigned long arg)
{
comedi_cmd __user *cmd;
comedi32_cmd __user *cmd32;
int rc;
cmd32 = compat_ptr(arg);
cmd = compat_alloc_user_space(sizeof(*cmd));
rc = get_compat_cmd(cmd, cmd32);
if (rc) {
return rc;
}
return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
}
/* Handle 32-bit COMEDI_CMDTEST ioctl. */
static int compat_cmdtest(struct file *file, unsigned long arg)
{
comedi_cmd __user *cmd;
comedi32_cmd __user *cmd32;
int rc, err;
cmd32 = compat_ptr(arg);
cmd = compat_alloc_user_space(sizeof(*cmd));
rc = get_compat_cmd(cmd, cmd32);
if (rc) {
return rc;
}
rc = translated_ioctl(file, COMEDI_CMDTEST, (unsigned long)cmd);
if (rc < 0) {
return rc;
}
err = put_compat_cmd(cmd32, cmd);
if (err) {
rc = err;
}
return rc;
}
/* Copy 32-bit insn structure to native insn structure. */
static int get_compat_insn(comedi_insn __user *insn,
comedi32_insn __user *insn32)
{
int err;
union {
unsigned int uint;
compat_uptr_t uptr;
} temp;
/* Copy insn structure. Ignore the unused members. */
err = 0;
if (!access_ok(VERIFY_READ, insn32, sizeof(*insn32))
|| !access_ok(VERIFY_WRITE, insn, sizeof(*insn))) {
return -EFAULT;
}
err |= __get_user(temp.uint, &insn32->insn);
err |= __put_user(temp.uint, &insn->insn);
err |= __get_user(temp.uint, &insn32->n);
err |= __put_user(temp.uint, &insn->n);
err |= __get_user(temp.uptr, &insn32->data);
err |= __put_user(compat_ptr(temp.uptr), &insn->data);
err |= __get_user(temp.uint, &insn32->subdev);
err |= __put_user(temp.uint, &insn->subdev);
err |= __get_user(temp.uint, &insn32->chanspec);
err |= __put_user(temp.uint, &insn->chanspec);
return err ? -EFAULT : 0;
}
/* Handle 32-bit COMEDI_INSNLIST ioctl. */
static int compat_insnlist(struct file *file, unsigned long arg)
{
struct combined_insnlist {
comedi_insnlist insnlist;
comedi_insn insn[1];
} __user *s;
comedi32_insnlist __user *insnlist32;
comedi32_insn __user *insn32;
compat_uptr_t uptr;
unsigned int n_insns, n;
int err, rc;
insnlist32 = compat_ptr(arg);
/* Get 32-bit insnlist structure. */
if (!access_ok(VERIFY_READ, insnlist32, sizeof(*insnlist32))) {
return -EFAULT;
}
err = 0;
err |= __get_user(n_insns, &insnlist32->n_insns);
err |= __get_user(uptr, &insnlist32->insns);
insn32 = compat_ptr(uptr);
if (err) {
return -EFAULT;
}
/* Allocate user memory to copy insnlist and insns into. */
s = compat_alloc_user_space(offsetof(struct combined_insnlist,
insn[n_insns]));
/* Set native insnlist structure. */
if (!access_ok(VERIFY_WRITE, &s->insnlist, sizeof(s->insnlist))) {
return -EFAULT;
}
err |= __put_user(n_insns, &s->insnlist.n_insns);
err |= __put_user(&s->insn[0], &s->insnlist.insns);
if (err) {
return -EFAULT;
}
/* Copy insn structures. */
for (n = 0; n < n_insns; n++) {
rc = get_compat_insn(&s->insn[n], &insn32[n]);
if (rc) {
return rc;
}
}
return translated_ioctl(file, COMEDI_INSNLIST,
(unsigned long)&s->insnlist);
}
/* Handle 32-bit COMEDI_INSN ioctl. */
static int compat_insn(struct file *file, unsigned long arg)
{
comedi_insn __user *insn;
comedi32_insn __user *insn32;
int rc;
insn32 = compat_ptr(arg);
insn = compat_alloc_user_space(sizeof(*insn));
rc = get_compat_insn(insn, insn32);
if (rc) {
return rc;
}
return translated_ioctl(file, COMEDI_INSN, (unsigned long)insn);
}
/* Process untranslated ioctl. */
/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
static inline int raw_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int rc;
switch (cmd) {
case COMEDI_DEVCONFIG:
case COMEDI_DEVINFO:
case COMEDI_SUBDINFO:
case COMEDI_BUFCONFIG:
case COMEDI_BUFINFO:
/* Just need to translate the pointer argument. */
arg = (unsigned long)compat_ptr(arg);
rc = translated_ioctl(file, cmd, arg);
break;
case COMEDI_LOCK:
case COMEDI_UNLOCK:
case COMEDI_CANCEL:
case COMEDI_POLL:
/* No translation needed. */
rc = translated_ioctl(file, cmd, arg);
break;
case COMEDI32_CHANINFO:
rc = compat_chaninfo(file, arg);
break;
case COMEDI32_RANGEINFO:
rc = compat_rangeinfo(file, arg);
break;
case COMEDI32_CMD:
rc = compat_cmd(file, arg);
break;
case COMEDI32_CMDTEST:
rc = compat_cmdtest(file, arg);
break;
case COMEDI32_INSNLIST:
rc = compat_insnlist(file, arg);
break;
case COMEDI32_INSN:
rc = compat_insn(file, arg);
break;
default:
rc = -ENOIOCTLCMD;
break;
}
return rc;
}
#ifdef HAVE_COMPAT_IOCTL /* defined in <linux/fs.h> 2.6.11 onwards */
/* compat_ioctl file operation. */
/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
long comedi_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return raw_ioctl(file, cmd, arg);
}
#else /* HAVE_COMPAT_IOCTL */
/*
* Brain-dead ioctl compatibility for 2.6.10 and earlier.
*
* It's brain-dead because cmd numbers need to be unique system-wide!
* The comedi driver could end up attempting to execute ioctls for non-Comedi
* devices because it registered the system-wide cmd code first. Similarly,
* another driver could end up attempting to execute ioctls for a Comedi
* device because it registered the cmd code first. Chaos ensues.
*/
/* Handler for all 32-bit ioctl codes registered by this driver. */
static int mapped_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg,
struct file *file)
{
int rc;
/* Make sure we are dealing with a Comedi device. */
if (imajor(file->f_dentry->d_inode) != COMEDI_MAJOR) {
return -ENOTTY;
}
rc = raw_ioctl(file, cmd, arg);
/* Do not return -ENOIOCTLCMD. */
if (rc == -ENOIOCTLCMD) {
rc = -ENOTTY;
}
return rc;
}
struct ioctl32_map {
unsigned int cmd;
int (*handler)(unsigned int, unsigned int, unsigned long,
struct file *);
int registered;
};
static struct ioctl32_map comedi_ioctl32_map[] = {
{ COMEDI_DEVCONFIG, mapped_ioctl, 0 },
{ COMEDI_DEVINFO, mapped_ioctl, 0 },
{ COMEDI_SUBDINFO, mapped_ioctl, 0 },
{ COMEDI_BUFCONFIG, mapped_ioctl, 0 },
{ COMEDI_BUFINFO, mapped_ioctl, 0 },
{ COMEDI_LOCK, mapped_ioctl, 0 },
{ COMEDI_UNLOCK, mapped_ioctl, 0 },
{ COMEDI_CANCEL, mapped_ioctl, 0 },
{ COMEDI_POLL, mapped_ioctl, 0 },
{ COMEDI32_CHANINFO, mapped_ioctl, 0 },
{ COMEDI32_RANGEINFO, mapped_ioctl, 0 },
{ COMEDI32_CMD, mapped_ioctl, 0 },
{ COMEDI32_CMDTEST, mapped_ioctl, 0 },
{ COMEDI32_INSNLIST, mapped_ioctl, 0 },
{ COMEDI32_INSN, mapped_ioctl, 0 },
};
#define NUM_IOCTL32_MAPS ARRAY_SIZE(comedi_ioctl32_map)
/* Register system-wide 32-bit ioctl handlers. */
void comedi_register_ioctl32(void)
{
int n, rc;
for (n = 0; n < NUM_IOCTL32_MAPS; n++) {
rc = register_ioctl32_conversion(comedi_ioctl32_map[n].cmd,
comedi_ioctl32_map[n].handler);
if (rc) {
printk(KERN_WARNING
"comedi: failed to register 32-bit "
"compatible ioctl handler for 0x%X - "
"expect bad things to happen!\n",
comedi_ioctl32_map[n].cmd);
}
comedi_ioctl32_map[n].registered = !rc;
}
}
/* Unregister system-wide 32-bit ioctl translations. */
void comedi_unregister_ioctl32(void)
{
int n, rc;
for (n = 0; n < NUM_IOCTL32_MAPS; n++) {
if (comedi_ioctl32_map[n].registered) {
rc = unregister_ioctl32_conversion(
comedi_ioctl32_map[n].cmd,
comedi_ioctl32_map[n].handler);
if (rc) {
printk(KERN_ERR
"comedi: failed to unregister 32-bit "
"compatible ioctl handler for 0x%X - "
"expect kernel Oops!\n",
comedi_ioctl32_map[n].cmd);
} else {
comedi_ioctl32_map[n].registered = 0;
}
}
}
}
#endif /* HAVE_COMPAT_IOCTL */
#endif /* CONFIG_COMPAT */

View File

@ -0,0 +1,58 @@
/*
comedi/comedi_compat32.h
32-bit ioctl compatibility for 64-bit comedi kernel module.
Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _COMEDI_COMPAT32_H
#define _COMEDI_COMPAT32_H
#include <linux/compat.h>
#include <linux/fs.h> /* For HAVE_COMPAT_IOCTL and HAVE_UNLOCKED_IOCTL */
#ifdef CONFIG_COMPAT
#ifdef HAVE_COMPAT_IOCTL
extern long comedi_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
#define comedi_register_ioctl32() do {} while (0)
#define comedi_unregister_ioctl32() do {} while (0)
#else /* HAVE_COMPAT_IOCTL */
#define comedi_compat_ioctl 0 /* NULL */
extern void comedi_register_ioctl32(void);
extern void comedi_unregister_ioctl32(void);
#endif /* HAVE_COMPAT_IOCTL */
#else /* CONFIG_COMPAT */
#define comedi_compat_ioctl 0 /* NULL */
#define comedi_register_ioctl32() do {} while (0)
#define comedi_unregister_ioctl32() do {} while (0)
#endif /* CONFIG_COMPAT */
#endif /* _COMEDI_COMPAT32_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
#ifndef _COMEDI_FOPS_H
#define _COMEDI_FOPS_H
extern struct class *comedi_class;
extern const struct file_operations comedi_fops;
#endif /* _COMEDI_FOPS_H */

View File

@ -0,0 +1,77 @@
/*
module/exp_ioctl.c
exported comedi functions
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define __NO_VERSION__
#ifndef EXPORT_SYMTAB
#define EXPORT_SYMTAB
#endif
#include "comedidev.h"
/* for drivers */
EXPORT_SYMBOL(comedi_driver_register);
EXPORT_SYMBOL(comedi_driver_unregister);
//EXPORT_SYMBOL(comedi_bufcheck);
//EXPORT_SYMBOL(comedi_done);
//EXPORT_SYMBOL(comedi_error_done);
EXPORT_SYMBOL(comedi_error);
//EXPORT_SYMBOL(comedi_eobuf);
//EXPORT_SYMBOL(comedi_eos);
EXPORT_SYMBOL(comedi_event);
EXPORT_SYMBOL(comedi_get_subdevice_runflags);
EXPORT_SYMBOL(comedi_set_subdevice_runflags);
EXPORT_SYMBOL(range_bipolar10);
EXPORT_SYMBOL(range_bipolar5);
EXPORT_SYMBOL(range_bipolar2_5);
EXPORT_SYMBOL(range_unipolar10);
EXPORT_SYMBOL(range_unipolar5);
EXPORT_SYMBOL(range_unknown);
#ifdef CONFIG_COMEDI_RT
EXPORT_SYMBOL(comedi_free_irq);
EXPORT_SYMBOL(comedi_request_irq);
EXPORT_SYMBOL(comedi_switch_to_rt);
EXPORT_SYMBOL(comedi_switch_to_non_rt);
EXPORT_SYMBOL(rt_pend_call);
#endif
#ifdef CONFIG_COMEDI_DEBUG
EXPORT_SYMBOL(comedi_debug);
#endif
EXPORT_SYMBOL_GPL(comedi_alloc_board_minor);
EXPORT_SYMBOL_GPL(comedi_free_board_minor);
EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
/* for kcomedilib */
EXPORT_SYMBOL(check_chanlist);
EXPORT_SYMBOL_GPL(comedi_get_device_file_info);
EXPORT_SYMBOL(comedi_buf_put);
EXPORT_SYMBOL(comedi_buf_get);
EXPORT_SYMBOL(comedi_buf_read_n_available);
EXPORT_SYMBOL(comedi_buf_write_free);
EXPORT_SYMBOL(comedi_buf_write_alloc);
EXPORT_SYMBOL(comedi_buf_read_free);
EXPORT_SYMBOL(comedi_buf_read_alloc);
EXPORT_SYMBOL(comedi_buf_memcpy_to);
EXPORT_SYMBOL(comedi_buf_memcpy_from);
EXPORT_SYMBOL(comedi_reset_async_buf);

View File

@ -0,0 +1,150 @@
/*
module/comedi_rt.h
header file for real-time structures, variables, and constants
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _COMEDI_RT_H
#define _COMEDI_RT_H
#ifndef _COMEDIDEV_H
#error comedi_rt.h should only be included by comedidev.h
#endif
#include <linux/version.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#ifdef CONFIG_COMEDI_RT
#ifdef CONFIG_COMEDI_RTAI
#include <rtai.h>
#include <rtai_sched.h>
#include <rtai_version.h>
#endif
#ifdef CONFIG_COMEDI_RTL
#include <rtl_core.h>
#include <rtl_time.h>
/* #ifdef RTLINUX_VERSION_CODE */
#include <rtl_sync.h>
/* #endif */
#define rt_printk rtl_printf
#endif
#ifdef CONFIG_COMEDI_FUSION
#define rt_printk(format, args...) printk(format , ## args)
#endif /* CONFIG_COMEDI_FUSION */
#ifdef CONFIG_PRIORITY_IRQ
#define rt_printk printk
#endif
int comedi_request_irq(unsigned int irq, irqreturn_t(*handler) (int,
void *PT_REGS_ARG), unsigned long flags, const char *device,
comedi_device *dev_id);
void comedi_free_irq(unsigned int irq, comedi_device *dev_id);
void comedi_rt_init(void);
void comedi_rt_cleanup(void);
int comedi_switch_to_rt(comedi_device *dev);
void comedi_switch_to_non_rt(comedi_device *dev);
void comedi_rt_pend_wakeup(wait_queue_head_t *q);
extern int rt_pend_call(void (*func) (int arg1, void *arg2), int arg1,
void *arg2);
#else
#define comedi_request_irq(a, b, c, d, e) request_irq(a, b, c, d, e)
#define comedi_free_irq(a, b) free_irq(a, b)
#define comedi_rt_init() do {} while (0)
#define comedi_rt_cleanup() do {} while (0)
#define comedi_switch_to_rt(a) (-1)
#define comedi_switch_to_non_rt(a) do {} while (0)
#define comedi_rt_pend_wakeup(a) do {} while (0)
#define rt_printk(format, args...) printk(format, ##args)
#endif
/* Define a spin_lock_irqsave function that will work with rt or without.
* Use inline functions instead of just macros to enforce some type checking.
*/
#define comedi_spin_lock_irqsave(lock_ptr, flags) \
(flags = __comedi_spin_lock_irqsave(lock_ptr))
static inline unsigned long __comedi_spin_lock_irqsave(spinlock_t *lock_ptr)
{
unsigned long flags;
#if defined(CONFIG_COMEDI_RTAI)
flags = rt_spin_lock_irqsave(lock_ptr);
#elif defined(CONFIG_COMEDI_RTL)
rtl_spin_lock_irqsave(lock_ptr, flags);
#elif defined(CONFIG_COMEDI_RTL_V1)
rtl_spin_lock_irqsave(lock_ptr, flags);
#elif defined(CONFIG_COMEDI_FUSION)
rthal_spin_lock_irqsave(lock_ptr, flags);
#else
spin_lock_irqsave(lock_ptr, flags);
#endif
return flags;
}
static inline void comedi_spin_unlock_irqrestore(spinlock_t *lock_ptr,
unsigned long flags)
{
#if defined(CONFIG_COMEDI_RTAI)
rt_spin_unlock_irqrestore(flags, lock_ptr);
#elif defined(CONFIG_COMEDI_RTL)
rtl_spin_unlock_irqrestore(lock_ptr, flags);
#elif defined(CONFIG_COMEDI_RTL_V1)
rtl_spin_unlock_irqrestore(lock_ptr, flags);
#elif defined(CONFIG_COMEDI_FUSION)
rthal_spin_unlock_irqrestore(lock_ptr, flags);
#else
spin_unlock_irqrestore(lock_ptr, flags);
#endif
}
/* define a RT safe udelay */
static inline void comedi_udelay(unsigned int usec)
{
#if defined(CONFIG_COMEDI_RTAI)
static const int nanosec_per_usec = 1000;
rt_busy_sleep(usec * nanosec_per_usec);
#elif defined(CONFIG_COMEDI_RTL)
static const int nanosec_per_usec = 1000;
rtl_delay(usec * nanosec_per_usec);
#else
udelay(usec);
#endif
}
#endif

View File

@ -0,0 +1,537 @@
/*
include/linux/comedidev.h
header file for kernel-only structures, variables, and constants
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _COMEDIDEV_H
#define _COMEDIDEV_H
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include "interrupt.h"
#include <linux/dma-mapping.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "comedi.h"
#define DPRINTK(format, args...) do { \
if (comedi_debug) \
printk(KERN_DEBUG "comedi: " format , ## args); \
} while (0)
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, COMEDI_MINORVERSION, COMEDI_MICROVERSION)
#define COMEDI_RELEASE VERSION
#define COMEDI_INITCLEANUP_NOMODULE(x) \
static int __init x ## _init_module(void) \
{return comedi_driver_register(&(x));} \
static void __exit x ## _cleanup_module(void) \
{comedi_driver_unregister(&(x));} \
module_init(x ## _init_module); \
module_exit(x ## _cleanup_module); \
#define COMEDI_MODULE_MACROS \
MODULE_AUTHOR("Comedi http://www.comedi.org"); \
MODULE_DESCRIPTION("Comedi low-level driver"); \
MODULE_LICENSE("GPL"); \
#define COMEDI_INITCLEANUP(x) \
COMEDI_MODULE_MACROS \
COMEDI_INITCLEANUP_NOMODULE(x)
#define COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table) \
static int __devinit comedi_driver ## _pci_probe(struct pci_dev *dev, \
const struct pci_device_id *ent) \
{ \
return comedi_pci_auto_config(dev, comedi_driver.driver_name); \
} \
static void __devexit comedi_driver ## _pci_remove(struct pci_dev *dev) \
{ \
comedi_pci_auto_unconfig(dev); \
} \
static struct pci_driver comedi_driver ## _pci_driver = \
{ \
.id_table = pci_id_table, \
.probe = &comedi_driver ## _pci_probe, \
.remove = __devexit_p(&comedi_driver ## _pci_remove) \
}; \
static int __init comedi_driver ## _init_module(void) \
{ \
int retval; \
retval = comedi_driver_register(&comedi_driver); \
if (retval < 0) \
return retval; \
comedi_driver ## _pci_driver.name = (char *)comedi_driver.driver_name; \
return pci_register_driver(&comedi_driver ## _pci_driver); \
} \
static void __exit comedi_driver ## _cleanup_module(void) \
{ \
pci_unregister_driver(&comedi_driver ## _pci_driver); \
comedi_driver_unregister(&comedi_driver); \
} \
module_init(comedi_driver ## _init_module); \
module_exit(comedi_driver ## _cleanup_module);
#define COMEDI_PCI_INITCLEANUP(comedi_driver, pci_id_table) \
COMEDI_MODULE_MACROS \
COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table)
#define PCI_VENDOR_ID_INOVA 0x104c
#define PCI_VENDOR_ID_NATINST 0x1093
#define PCI_VENDOR_ID_DATX 0x1116
#define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_VENDOR_ID_RTD 0x1435
#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_ICP 0x104c
#define PCI_VENDOR_ID_CONTEC 0x1221
#define PCI_VENDOR_ID_MEILHAUS 0x1402
#define COMEDI_NUM_MINORS 0x100
#define COMEDI_NUM_LEGACY_MINORS 0x10
#define COMEDI_NUM_BOARD_MINORS 0x30
#define COMEDI_FIRST_SUBDEVICE_MINOR COMEDI_NUM_BOARD_MINORS
typedef struct comedi_device_struct comedi_device;
typedef struct comedi_subdevice_struct comedi_subdevice;
typedef struct comedi_async_struct comedi_async;
typedef struct comedi_driver_struct comedi_driver;
typedef struct comedi_lrange_struct comedi_lrange;
typedef struct device device_create_result_type;
#define COMEDI_DEVICE_CREATE(cs, parent, devt, drvdata, device, fmt...) \
device_create(cs, ((parent) ? (parent) : (device)), devt, drvdata, fmt)
struct comedi_subdevice_struct {
comedi_device *device;
int type;
int n_chan;
volatile int subdev_flags;
int len_chanlist; /* maximum length of channel/gain list */
void *private;
comedi_async *async;
void *lock;
void *busy;
unsigned runflags;
spinlock_t spin_lock;
int io_bits;
lsampl_t maxdata; /* if maxdata==0, use list */
const lsampl_t *maxdata_list; /* list is channel specific */
unsigned int flags;
const unsigned int *flaglist;
unsigned int settling_time_0;
const comedi_lrange *range_table;
const comedi_lrange *const *range_table_list;
unsigned int *chanlist; /* driver-owned chanlist (not used) */
int (*insn_read) (comedi_device *, comedi_subdevice *, comedi_insn *,
lsampl_t *);
int (*insn_write) (comedi_device *, comedi_subdevice *, comedi_insn *,
lsampl_t *);
int (*insn_bits) (comedi_device *, comedi_subdevice *, comedi_insn *,
lsampl_t *);
int (*insn_config) (comedi_device *, comedi_subdevice *, comedi_insn *,
lsampl_t *);
int (*do_cmd) (comedi_device *, comedi_subdevice *);
int (*do_cmdtest) (comedi_device *, comedi_subdevice *, comedi_cmd *);
int (*poll) (comedi_device *, comedi_subdevice *);
int (*cancel) (comedi_device *, comedi_subdevice *);
/* int (*do_lock)(comedi_device *,comedi_subdevice *); */
/* int (*do_unlock)(comedi_device *,comedi_subdevice *); */
/* called when the buffer changes */
int (*buf_change) (comedi_device *dev, comedi_subdevice *s,
unsigned long new_size);
void (*munge) (comedi_device *dev, comedi_subdevice *s, void *data,
unsigned int num_bytes, unsigned int start_chan_index);
enum dma_data_direction async_dma_dir;
unsigned int state;
device_create_result_type *class_dev;
int minor;
};
struct comedi_buf_page {
void *virt_addr;
dma_addr_t dma_addr;
};
struct comedi_async_struct {
comedi_subdevice *subdevice;
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
struct comedi_buf_page *buf_page_list; /* virtual and dma address of each page */
unsigned n_buf_pages; /* num elements in buf_page_list */
unsigned int max_bufsize; /* maximum buffer size, bytes */
unsigned int mmap_count; /* current number of mmaps of prealloc_buf */
unsigned int buf_write_count; /* byte count for writer (write completed) */
unsigned int buf_write_alloc_count; /* byte count for writer (allocated for writing) */
unsigned int buf_read_count; /* byte count for reader (read completed) */
unsigned int buf_read_alloc_count; /* byte count for reader (allocated for reading) */
unsigned int buf_write_ptr; /* buffer marker for writer */
unsigned int buf_read_ptr; /* buffer marker for reader */
unsigned int cur_chan; /* useless channel marker for interrupt */
/* number of bytes that have been received for current scan */
unsigned int scan_progress;
/* keeps track of where we are in chanlist as for munging */
unsigned int munge_chan;
/* number of bytes that have been munged */
unsigned int munge_count;
/* buffer marker for munging */
unsigned int munge_ptr;
unsigned int events; /* events that have occurred */
comedi_cmd cmd;
wait_queue_head_t wait_head;
/* callback stuff */
unsigned int cb_mask;
int (*cb_func) (unsigned int flags, void *);
void *cb_arg;
int (*inttrig) (comedi_device *dev, comedi_subdevice *s,
unsigned int x);
};
struct comedi_driver_struct {
struct comedi_driver_struct *next;
const char *driver_name;
struct module *module;
int (*attach) (comedi_device *, comedi_devconfig *);
int (*detach) (comedi_device *);
/* number of elements in board_name and board_id arrays */
unsigned int num_names;
const char *const *board_name;
/* offset in bytes from one board name pointer to the next */
int offset;
};
struct comedi_device_struct {
int use_count;
comedi_driver *driver;
void *private;
device_create_result_type *class_dev;
int minor;
/* hw_dev is passed to dma_alloc_coherent when allocating async buffers
* for subdevices that have async_dma_dir set to something other than
* DMA_NONE */
struct device *hw_dev;
const char *board_name;
const void *board_ptr;
int attached;
int rt;
spinlock_t spinlock;
struct mutex mutex;
int in_request_module;
int n_subdevices;
comedi_subdevice *subdevices;
/* dumb */
unsigned long iobase;
unsigned int irq;
comedi_subdevice *read_subdev;
comedi_subdevice *write_subdev;
struct fasync_struct *async_queue;
void (*open) (comedi_device *dev);
void (*close) (comedi_device *dev);
};
struct comedi_device_file_info {
comedi_device *device;
comedi_subdevice *read_subdevice;
comedi_subdevice *write_subdevice;
};
#ifdef CONFIG_COMEDI_DEBUG
extern int comedi_debug;
#else
static const int comedi_debug;
#endif
/*
* function prototypes
*/
void comedi_event(comedi_device *dev, comedi_subdevice *s);
void comedi_error(const comedi_device *dev, const char *s);
/* we can expand the number of bits used to encode devices/subdevices into
the minor number soon, after more distros support > 8 bit minor numbers
(like after Debian Etch gets released) */
enum comedi_minor_bits {
COMEDI_DEVICE_MINOR_MASK = 0xf,
COMEDI_SUBDEVICE_MINOR_MASK = 0xf0
};
static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor);
static inline comedi_subdevice *comedi_get_read_subdevice(
const struct comedi_device_file_info *info)
{
if (info->read_subdevice)
return info->read_subdevice;
if (info->device == NULL)
return NULL;
return info->device->read_subdev;
}
static inline comedi_subdevice *comedi_get_write_subdevice(
const struct comedi_device_file_info *info)
{
if (info->write_subdevice)
return info->write_subdevice;
if (info->device == NULL)
return NULL;
return info->device->write_subdev;
}
void comedi_device_detach(comedi_device *dev);
int comedi_device_attach(comedi_device *dev, comedi_devconfig *it);
int comedi_driver_register(comedi_driver *);
int comedi_driver_unregister(comedi_driver *);
void init_polling(void);
void cleanup_polling(void);
void start_polling(comedi_device *);
void stop_polling(comedi_device *);
int comedi_buf_alloc(comedi_device *dev, comedi_subdevice *s, unsigned long
new_size);
#ifdef CONFIG_PROC_FS
void comedi_proc_init(void);
void comedi_proc_cleanup(void);
#else
static inline void comedi_proc_init(void)
{
}
static inline void comedi_proc_cleanup(void)
{
}
#endif
/* subdevice runflags */
enum subdevice_runflags {
SRF_USER = 0x00000001,
SRF_RT = 0x00000002,
/* indicates an COMEDI_CB_ERROR event has occurred since the last
* command was started */
SRF_ERROR = 0x00000004,
SRF_RUNNING = 0x08000000
};
/*
various internal comedi functions
*/
int do_rangeinfo_ioctl(comedi_device *dev, comedi_rangeinfo *arg);
int check_chanlist(comedi_subdevice *s, int n, unsigned int *chanlist);
void comedi_set_subdevice_runflags(comedi_subdevice *s, unsigned mask,
unsigned bits);
unsigned comedi_get_subdevice_runflags(comedi_subdevice *s);
int insn_inval(comedi_device *dev, comedi_subdevice *s,
comedi_insn *insn, lsampl_t *data);
/* range stuff */
#define RANGE(a, b) {(a)*1e6, (b)*1e6, 0}
#define RANGE_ext(a, b) {(a)*1e6, (b)*1e6, RF_EXTERNAL}
#define RANGE_mA(a, b) {(a)*1e6, (b)*1e6, UNIT_mA}
#define RANGE_unitless(a, b) {(a)*1e6, (b)*1e6, 0} /* XXX */
#define BIP_RANGE(a) {-(a)*1e6, (a)*1e6, 0}
#define UNI_RANGE(a) {0, (a)*1e6, 0}
extern const comedi_lrange range_bipolar10;
extern const comedi_lrange range_bipolar5;
extern const comedi_lrange range_bipolar2_5;
extern const comedi_lrange range_unipolar10;
extern const comedi_lrange range_unipolar5;
extern const comedi_lrange range_unknown;
#define range_digital range_unipolar5
#if __GNUC__ >= 3
#define GCC_ZERO_LENGTH_ARRAY
#else
#define GCC_ZERO_LENGTH_ARRAY 0
#endif
struct comedi_lrange_struct {
int length;
comedi_krange range[GCC_ZERO_LENGTH_ARRAY];
};
/* some silly little inline functions */
static inline int alloc_subdevices(comedi_device *dev,
unsigned int num_subdevices)
{
unsigned i;
dev->n_subdevices = num_subdevices;
dev->subdevices =
kcalloc(num_subdevices, sizeof(comedi_subdevice), GFP_KERNEL);
if (!dev->subdevices)
return -ENOMEM;
for (i = 0; i < num_subdevices; ++i) {
dev->subdevices[i].device = dev;
dev->subdevices[i].async_dma_dir = DMA_NONE;
spin_lock_init(&dev->subdevices[i].spin_lock);
dev->subdevices[i].minor = -1;
}
return 0;
}
static inline int alloc_private(comedi_device *dev, int size)
{
dev->private = kzalloc(size, GFP_KERNEL);
if (!dev->private)
return -ENOMEM;
return 0;
}
static inline unsigned int bytes_per_sample(const comedi_subdevice *subd)
{
if (subd->subdev_flags & SDF_LSAMPL)
return sizeof(lsampl_t);
else
return sizeof(sampl_t);
}
/* must be used in attach to set dev->hw_dev if you wish to dma directly
into comedi's buffer */
static inline void comedi_set_hw_dev(comedi_device *dev, struct device *hw_dev)
{
if (dev->hw_dev)
put_device(dev->hw_dev);
dev->hw_dev = hw_dev;
if (dev->hw_dev) {
dev->hw_dev = get_device(dev->hw_dev);
BUG_ON(dev->hw_dev == NULL);
}
}
int comedi_buf_put(comedi_async *async, sampl_t x);
int comedi_buf_get(comedi_async *async, sampl_t *x);
unsigned int comedi_buf_write_n_available(comedi_async *async);
unsigned int comedi_buf_write_alloc(comedi_async *async, unsigned int nbytes);
unsigned int comedi_buf_write_alloc_strict(comedi_async *async,
unsigned int nbytes);
unsigned comedi_buf_write_free(comedi_async *async, unsigned int nbytes);
unsigned comedi_buf_read_alloc(comedi_async *async, unsigned nbytes);
unsigned comedi_buf_read_free(comedi_async *async, unsigned int nbytes);
unsigned int comedi_buf_read_n_available(comedi_async *async);
void comedi_buf_memcpy_to(comedi_async *async, unsigned int offset,
const void *source, unsigned int num_bytes);
void comedi_buf_memcpy_from(comedi_async *async, unsigned int offset,
void *destination, unsigned int num_bytes);
static inline unsigned comedi_buf_write_n_allocated(comedi_async *async)
{
return async->buf_write_alloc_count - async->buf_write_count;
}
static inline unsigned comedi_buf_read_n_allocated(comedi_async *async)
{
return async->buf_read_alloc_count - async->buf_read_count;
}
void comedi_reset_async_buf(comedi_async *async);
static inline void *comedi_aux_data(int options[], int n)
{
unsigned long address;
unsigned long addressLow;
int bit_shift;
if (sizeof(int) >= sizeof(void *))
address = options[COMEDI_DEVCONF_AUX_DATA_LO];
else {
address = options[COMEDI_DEVCONF_AUX_DATA_HI];
bit_shift = sizeof(int) * 8;
address <<= bit_shift;
addressLow = options[COMEDI_DEVCONF_AUX_DATA_LO];
addressLow &= (1UL << bit_shift) - 1;
address |= addressLow;
}
if (n >= 1)
address += options[COMEDI_DEVCONF_AUX_DATA0_LENGTH];
if (n >= 2)
address += options[COMEDI_DEVCONF_AUX_DATA1_LENGTH];
if (n >= 3)
address += options[COMEDI_DEVCONF_AUX_DATA2_LENGTH];
BUG_ON(n > 3);
return (void *)address;
}
int comedi_alloc_board_minor(struct device *hardware_device);
void comedi_free_board_minor(unsigned minor);
int comedi_alloc_subdevice_minor(comedi_device *dev, comedi_subdevice *s);
void comedi_free_subdevice_minor(comedi_subdevice *s);
int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name);
void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
#include "comedi_rt.h"
#endif /* _COMEDIDEV_H */

View File

@ -0,0 +1,192 @@
/*
linux/include/comedilib.h
header file for kcomedilib
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_COMEDILIB_H
#define _LINUX_COMEDILIB_H
#include "comedi.h"
/* Kernel internal stuff. Needed by real-time modules and such. */
#ifndef __KERNEL__
#error linux/comedilib.h should not be included by non-kernel-space code
#endif
/* exported functions */
#ifndef KCOMEDILIB_DEPRECATED
typedef void comedi_t;
/* these functions may not be called at real-time priority */
comedi_t *comedi_open(const char *path);
int comedi_close(comedi_t *dev);
/* these functions may be called at any priority, but may fail at
real-time priority */
int comedi_lock(comedi_t *dev, unsigned int subdev);
int comedi_unlock(comedi_t *dev, unsigned int subdev);
/* these functions may be called at any priority, but you must hold
the lock for the subdevice */
int comedi_loglevel(int loglevel);
void comedi_perror(const char *s);
char *comedi_strerror(int errnum);
int comedi_errno(void);
int comedi_fileno(comedi_t *dev);
int comedi_cancel(comedi_t *dev, unsigned int subdev);
int comedi_register_callback(comedi_t *dev, unsigned int subdev,
unsigned int mask, int (*cb) (unsigned int, void *), void *arg);
int comedi_command(comedi_t *dev, comedi_cmd *cmd);
int comedi_command_test(comedi_t *dev, comedi_cmd *cmd);
int comedi_trigger(comedi_t *dev, unsigned int subdev, comedi_trig *it);
int __comedi_trigger(comedi_t *dev, unsigned int subdev, comedi_trig *it);
int comedi_data_write(comedi_t *dev, unsigned int subdev, unsigned int chan,
unsigned int range, unsigned int aref, lsampl_t data);
int comedi_data_read(comedi_t *dev, unsigned int subdev, unsigned int chan,
unsigned int range, unsigned int aref, lsampl_t *data);
int comedi_data_read_hint(comedi_t *dev, unsigned int subdev,
unsigned int chan, unsigned int range, unsigned int aref);
int comedi_data_read_delayed(comedi_t *dev, unsigned int subdev,
unsigned int chan, unsigned int range, unsigned int aref,
lsampl_t *data, unsigned int nano_sec);
int comedi_dio_config(comedi_t *dev, unsigned int subdev, unsigned int chan,
unsigned int io);
int comedi_dio_read(comedi_t *dev, unsigned int subdev, unsigned int chan,
unsigned int *val);
int comedi_dio_write(comedi_t *dev, unsigned int subdev, unsigned int chan,
unsigned int val);
int comedi_dio_bitfield(comedi_t *dev, unsigned int subdev, unsigned int mask,
unsigned int *bits);
int comedi_get_n_subdevices(comedi_t *dev);
int comedi_get_version_code(comedi_t *dev);
const char *comedi_get_driver_name(comedi_t *dev);
const char *comedi_get_board_name(comedi_t *dev);
int comedi_get_subdevice_type(comedi_t *dev, unsigned int subdevice);
int comedi_find_subdevice_by_type(comedi_t *dev, int type, unsigned int subd);
int comedi_get_n_channels(comedi_t *dev, unsigned int subdevice);
lsampl_t comedi_get_maxdata(comedi_t *dev, unsigned int subdevice, unsigned
int chan);
int comedi_get_n_ranges(comedi_t *dev, unsigned int subdevice, unsigned int
chan);
int comedi_do_insn(comedi_t *dev, comedi_insn *insn);
int comedi_poll(comedi_t *dev, unsigned int subdev);
/* DEPRECATED functions */
int comedi_get_rangetype(comedi_t *dev, unsigned int subdevice,
unsigned int chan);
/* ALPHA functions */
unsigned int comedi_get_subdevice_flags(comedi_t *dev, unsigned int subdevice);
int comedi_get_len_chanlist(comedi_t *dev, unsigned int subdevice);
int comedi_get_krange(comedi_t *dev, unsigned int subdevice, unsigned int
chan, unsigned int range, comedi_krange *krange);
unsigned int comedi_get_buf_head_pos(comedi_t *dev, unsigned int subdevice);
int comedi_set_user_int_count(comedi_t *dev, unsigned int subdevice,
unsigned int buf_user_count);
int comedi_map(comedi_t *dev, unsigned int subdev, void *ptr);
int comedi_unmap(comedi_t *dev, unsigned int subdev);
int comedi_get_buffer_size(comedi_t *dev, unsigned int subdev);
int comedi_mark_buffer_read(comedi_t *dev, unsigned int subdevice,
unsigned int num_bytes);
int comedi_mark_buffer_written(comedi_t *d, unsigned int subdevice,
unsigned int num_bytes);
int comedi_get_buffer_contents(comedi_t *dev, unsigned int subdevice);
int comedi_get_buffer_offset(comedi_t *dev, unsigned int subdevice);
#else
/* these functions may not be called at real-time priority */
int comedi_open(unsigned int minor);
void comedi_close(unsigned int minor);
/* these functions may be called at any priority, but may fail at
real-time priority */
int comedi_lock(unsigned int minor, unsigned int subdev);
int comedi_unlock(unsigned int minor, unsigned int subdev);
/* these functions may be called at any priority, but you must hold
the lock for the subdevice */
int comedi_cancel(unsigned int minor, unsigned int subdev);
int comedi_register_callback(unsigned int minor, unsigned int subdev,
unsigned int mask, int (*cb) (unsigned int, void *), void *arg);
int comedi_command(unsigned int minor, comedi_cmd *cmd);
int comedi_command_test(unsigned int minor, comedi_cmd *cmd);
int comedi_trigger(unsigned int minor, unsigned int subdev, comedi_trig *it);
int __comedi_trigger(unsigned int minor, unsigned int subdev, comedi_trig *it);
int comedi_data_write(unsigned int dev, unsigned int subdev, unsigned int chan,
unsigned int range, unsigned int aref, lsampl_t data);
int comedi_data_read(unsigned int dev, unsigned int subdev, unsigned int chan,
unsigned int range, unsigned int aref, lsampl_t *data);
int comedi_dio_config(unsigned int dev, unsigned int subdev, unsigned int chan,
unsigned int io);
int comedi_dio_read(unsigned int dev, unsigned int subdev, unsigned int chan,
unsigned int *val);
int comedi_dio_write(unsigned int dev, unsigned int subdev, unsigned int chan,
unsigned int val);
int comedi_dio_bitfield(unsigned int dev, unsigned int subdev,
unsigned int mask, unsigned int *bits);
int comedi_get_n_subdevices(unsigned int dev);
int comedi_get_version_code(unsigned int dev);
char *comedi_get_driver_name(unsigned int dev);
char *comedi_get_board_name(unsigned int minor);
int comedi_get_subdevice_type(unsigned int minor, unsigned int subdevice);
int comedi_find_subdevice_by_type(unsigned int minor, int type,
unsigned int subd);
int comedi_get_n_channels(unsigned int minor, unsigned int subdevice);
lsampl_t comedi_get_maxdata(unsigned int minor, unsigned int subdevice, unsigned
int chan);
int comedi_get_n_ranges(unsigned int minor, unsigned int subdevice, unsigned int
chan);
int comedi_do_insn(unsigned int minor, comedi_insn *insn);
int comedi_poll(unsigned int minor, unsigned int subdev);
/* DEPRECATED functions */
int comedi_get_rangetype(unsigned int minor, unsigned int subdevice,
unsigned int chan);
/* ALPHA functions */
unsigned int comedi_get_subdevice_flags(unsigned int minor, unsigned int
subdevice);
int comedi_get_len_chanlist(unsigned int minor, unsigned int subdevice);
int comedi_get_krange(unsigned int minor, unsigned int subdevice, unsigned int
chan, unsigned int range, comedi_krange *krange);
unsigned int comedi_get_buf_head_pos(unsigned int minor, unsigned int
subdevice);
int comedi_set_user_int_count(unsigned int minor, unsigned int subdevice,
unsigned int buf_user_count);
int comedi_map(unsigned int minor, unsigned int subdev, void **ptr);
int comedi_unmap(unsigned int minor, unsigned int subdev);
#endif
#endif

View File

@ -0,0 +1,846 @@
/*
module/drivers.c
functions for manipulating drivers
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define _GNU_SOURCE
#define __NO_VERSION__
#include "comedi_fops.h"
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "comedidev.h"
#include "wrapper.h"
#include <linux/highmem.h> /* for SuSE brokenness */
#include <linux/vmalloc.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/system.h>
static int postconfig(comedi_device * dev);
static int insn_rw_emulate_bits(comedi_device * dev, comedi_subdevice * s,
comedi_insn * insn, lsampl_t * data);
static void *comedi_recognize(comedi_driver * driv, const char *name);
static void comedi_report_boards(comedi_driver * driv);
static int poll_invalid(comedi_device * dev, comedi_subdevice * s);
int comedi_buf_alloc(comedi_device * dev, comedi_subdevice * s,
unsigned long new_size);
comedi_driver *comedi_drivers;
int comedi_modprobe(int minor)
{
return -EINVAL;
}
static void cleanup_device(comedi_device * dev)
{
int i;
comedi_subdevice *s;
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
s = dev->subdevices + i;
comedi_free_subdevice_minor(s);
if (s->async) {
comedi_buf_alloc(dev, s, 0);
kfree(s->async);
}
}
kfree(dev->subdevices);
dev->subdevices = NULL;
dev->n_subdevices = 0;
}
if (dev->private) {
kfree(dev->private);
dev->private = NULL;
}
dev->driver = 0;
dev->board_name = NULL;
dev->board_ptr = NULL;
dev->iobase = 0;
dev->irq = 0;
dev->read_subdev = NULL;
dev->write_subdev = NULL;
dev->open = NULL;
dev->close = NULL;
comedi_set_hw_dev(dev, NULL);
}
static void __comedi_device_detach(comedi_device * dev)
{
dev->attached = 0;
if (dev->driver) {
dev->driver->detach(dev);
} else {
printk("BUG: dev->driver=NULL in comedi_device_detach()\n");
}
cleanup_device(dev);
}
void comedi_device_detach(comedi_device * dev)
{
if (!dev->attached)
return;
__comedi_device_detach(dev);
}
int comedi_device_attach(comedi_device * dev, comedi_devconfig * it)
{
comedi_driver *driv;
int ret;
if (dev->attached)
return -EBUSY;
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module)) {
printk("comedi: failed to increment module count, skipping\n");
continue;
}
if (driv->num_names) {
dev->board_ptr = comedi_recognize(driv, it->board_name);
if (dev->board_ptr == NULL) {
module_put(driv->module);
continue;
}
} else {
if (strcmp(driv->driver_name, it->board_name)) {
module_put(driv->module);
continue;
}
}
//initialize dev->driver here so comedi_error() can be called from attach
dev->driver = driv;
ret = driv->attach(dev, it);
if (ret < 0) {
module_put(dev->driver->module);
__comedi_device_detach(dev);
return ret;
}
goto attached;
}
// recognize has failed if we get here
// report valid board names before returning error
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module)) {
printk("comedi: failed to increment module count\n");
continue;
}
comedi_report_boards(driv);
module_put(driv->module);
}
return -EIO;
attached:
/* do a little post-config cleanup */
ret = postconfig(dev);
module_put(dev->driver->module);
if (ret < 0) {
__comedi_device_detach(dev);
return ret;
}
if (!dev->board_name) {
printk("BUG: dev->board_name=<%p>\n", dev->board_name);
dev->board_name = "BUG";
}
smp_wmb();
dev->attached = 1;
return 0;
}
int comedi_driver_register(comedi_driver * driver)
{
driver->next = comedi_drivers;
comedi_drivers = driver;
return 0;
}
int comedi_driver_unregister(comedi_driver * driver)
{
comedi_driver *prev;
int i;
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device_file_info *dev_file_info = comedi_get_device_file_info(i);
comedi_device *dev;
if(dev_file_info == NULL) continue;
dev = dev_file_info->device;
mutex_lock(&dev->mutex);
if (dev->attached && dev->driver == driver) {
if (dev->use_count)
printk("BUG! detaching device with use_count=%d\n", dev->use_count);
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
}
if (comedi_drivers == driver) {
comedi_drivers = driver->next;
return 0;
}
for (prev = comedi_drivers; prev->next; prev = prev->next) {
if (prev->next == driver) {
prev->next = driver->next;
return 0;
}
}
return -EINVAL;
}
static int postconfig(comedi_device * dev)
{
int i;
comedi_subdevice *s;
comedi_async *async = NULL;
int ret;
for (i = 0; i < dev->n_subdevices; i++) {
s = dev->subdevices + i;
if (s->type == COMEDI_SUBD_UNUSED)
continue;
if (s->len_chanlist == 0)
s->len_chanlist = 1;
if (s->do_cmd) {
BUG_ON((s->subdev_flags & (SDF_CMD_READ |
SDF_CMD_WRITE)) == 0);
BUG_ON(!s->do_cmdtest);
async = kzalloc(sizeof(comedi_async), GFP_KERNEL);
if (async == NULL) {
printk("failed to allocate async struct\n");
return -ENOMEM;
}
init_waitqueue_head(&async->wait_head);
async->subdevice = s;
s->async = async;
#define DEFAULT_BUF_MAXSIZE (64*1024)
#define DEFAULT_BUF_SIZE (64*1024)
async->max_bufsize = DEFAULT_BUF_MAXSIZE;
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
if (comedi_buf_alloc(dev, s, DEFAULT_BUF_SIZE) < 0) {
printk("Buffer allocation failed\n");
return -ENOMEM;
}
if (s->buf_change) {
ret = s->buf_change(dev, s, DEFAULT_BUF_SIZE);
if (ret < 0)
return ret;
}
comedi_alloc_subdevice_minor(dev, s);
}
if (!s->range_table && !s->range_table_list)
s->range_table = &range_unknown;
if (!s->insn_read && s->insn_bits)
s->insn_read = insn_rw_emulate_bits;
if (!s->insn_write && s->insn_bits)
s->insn_write = insn_rw_emulate_bits;
if (!s->insn_read)
s->insn_read = insn_inval;
if (!s->insn_write)
s->insn_write = insn_inval;
if (!s->insn_bits)
s->insn_bits = insn_inval;
if (!s->insn_config)
s->insn_config = insn_inval;
if (!s->poll)
s->poll = poll_invalid;
}
return 0;
}
// generic recognize function for drivers that register their supported board names
void *comedi_recognize(comedi_driver * driv, const char *name)
{
unsigned i;
const char *const *name_ptr = driv->board_name;
for (i = 0; i < driv->num_names; i++) {
if (strcmp(*name_ptr, name) == 0)
return (void *)name_ptr;
name_ptr =
(const char *const *)((const char *)name_ptr +
driv->offset);
}
return NULL;
}
void comedi_report_boards(comedi_driver * driv)
{
unsigned int i;
const char *const *name_ptr;
printk("comedi: valid board names for %s driver are:\n",
driv->driver_name);
name_ptr = driv->board_name;
for (i = 0; i < driv->num_names; i++) {
printk(" %s\n", *name_ptr);
name_ptr = (const char **)((char *)name_ptr + driv->offset);
}
if (driv->num_names == 0)
printk(" %s\n", driv->driver_name);
}
static int poll_invalid(comedi_device * dev, comedi_subdevice * s)
{
return -EINVAL;
}
int insn_inval(comedi_device * dev, comedi_subdevice * s,
comedi_insn * insn, lsampl_t * data)
{
return -EINVAL;
}
static int insn_rw_emulate_bits(comedi_device * dev, comedi_subdevice * s,
comedi_insn * insn, lsampl_t * data)
{
comedi_insn new_insn;
int ret;
static const unsigned channels_per_bitfield = 32;
unsigned chan = CR_CHAN(insn->chanspec);
const unsigned base_bitfield_channel =
(chan < channels_per_bitfield) ? 0 : chan;
lsampl_t new_data[2];
memset(new_data, 0, sizeof(new_data));
memset(&new_insn, 0, sizeof(new_insn));
new_insn.insn = INSN_BITS;
new_insn.chanspec = base_bitfield_channel;
new_insn.n = 2;
new_insn.data = new_data;
new_insn.subdev = insn->subdev;
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel)) : 0; /* bits */
}
ret = s->insn_bits(dev, s, &new_insn, new_data);
if (ret < 0)
return ret;
if (insn->insn == INSN_READ) {
data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
}
return 1;
}
static inline unsigned long uvirt_to_kva(pgd_t * pgd, unsigned long adr)
{
unsigned long ret = 0UL;
pmd_t *pmd;
pte_t *ptep, pte;
pud_t *pud;
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, adr);
pmd = pmd_offset(pud, adr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, adr);
pte = *ptep;
if (pte_present(pte)) {
ret = (unsigned long)
page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
}
return ret;
}
static inline unsigned long kvirt_to_kva(unsigned long adr)
{
unsigned long va, kva;
va = adr;
kva = uvirt_to_kva(pgd_offset_k(va), va);
return kva;
}
int comedi_buf_alloc(comedi_device * dev, comedi_subdevice * s,
unsigned long new_size)
{
comedi_async *async = s->async;
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
if (async->prealloc_buf && async->prealloc_bufsz == new_size) {
return 0;
}
// deallocate old buffer
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
if (async->buf_page_list) {
unsigned i;
for (i = 0; i < async->n_buf_pages; ++i) {
if (async->buf_page_list[i].virt_addr) {
mem_map_unreserve(virt_to_page(async->
buf_page_list[i].virt_addr));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->buf_page_list[i].
virt_addr,
async->buf_page_list[i].
dma_addr);
} else {
free_page((unsigned long)async->
buf_page_list[i].virt_addr);
}
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
async->n_buf_pages = 0;
}
// allocate new buffer
if (new_size) {
unsigned i = 0;
unsigned n_pages = new_size >> PAGE_SHIFT;
struct page **pages = NULL;
async->buf_page_list =
vmalloc(sizeof(struct comedi_buf_page) * n_pages);
if (async->buf_page_list) {
memset(async->buf_page_list, 0,
sizeof(struct comedi_buf_page) * n_pages);
pages = vmalloc(sizeof(struct page *) * n_pages);
}
if (pages) {
for (i = 0; i < n_pages; i++) {
if (s->async_dma_dir != DMA_NONE) {
async->buf_page_list[i].virt_addr =
dma_alloc_coherent(dev->hw_dev,
PAGE_SIZE,
&async->buf_page_list[i].
dma_addr,
GFP_KERNEL | __GFP_COMP);
} else {
async->buf_page_list[i].virt_addr =
(void *)
get_zeroed_page(GFP_KERNEL);
}
if (async->buf_page_list[i].virt_addr == NULL) {
break;
}
mem_map_reserve(virt_to_page(async->
buf_page_list[i].virt_addr));
pages[i] =
virt_to_page(async->buf_page_list[i].
virt_addr);
}
}
if (i == n_pages) {
async->prealloc_buf =
vmap(pages, n_pages, VM_MAP,
PAGE_KERNEL_NOCACHE);
}
if (pages) {
vfree(pages);
}
if (async->prealloc_buf == NULL) {
/* Some allocation failed above. */
if (async->buf_page_list) {
for (i = 0; i < n_pages; i++) {
if (async->buf_page_list[i].virt_addr ==
NULL) {
break;
}
mem_map_unreserve(virt_to_page(async->
buf_page_list[i].
virt_addr));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->buf_page_list[i].
virt_addr,
async->buf_page_list[i].
dma_addr);
} else {
free_page((unsigned long)async->
buf_page_list[i].
virt_addr);
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
}
return -ENOMEM;
}
async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
return 0;
}
/* munging is applied to data by core as it passes between user
* and kernel space */
unsigned int comedi_buf_munge(comedi_async * async, unsigned int num_bytes)
{
comedi_subdevice *s = async->subdevice;
unsigned int count = 0;
const unsigned num_sample_bytes = bytes_per_sample(s);
if (s->munge == NULL || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
if ((int)(async->munge_count - async->buf_write_count) > 0)
BUG();
return num_bytes;
}
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
int block_size;
block_size = num_bytes - count;
if (block_size < 0) {
rt_printk("%s: %s: bug! block_size is negative\n",
__FILE__, __FUNCTION__);
break;
}
if ((int)(async->munge_ptr + block_size -
async->prealloc_bufsz) > 0)
block_size = async->prealloc_bufsz - async->munge_ptr;
s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
smp_wmb(); //barrier insures data is munged in buffer before munge_count is incremented
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
if ((int)(async->munge_count - async->buf_write_count) > 0)
BUG();
return count;
}
unsigned int comedi_buf_write_n_available(comedi_async * async)
{
unsigned int free_end;
unsigned int nbytes;
if (async == NULL)
return 0;
free_end = async->buf_read_count + async->prealloc_bufsz;
nbytes = free_end - async->buf_write_alloc_count;
nbytes -= nbytes % bytes_per_sample(async->subdevice);
/* barrier insures the read of buf_read_count in this
query occurs before any following writes to the buffer which
might be based on the return value from this query.
*/
smp_mb();
return nbytes;
}
/* allocates chunk for the writer from free buffer space */
unsigned int comedi_buf_write_alloc(comedi_async * async, unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0) {
nbytes = free_end - async->buf_write_alloc_count;
}
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
/* allocates nothing unless it can completely fulfill the request */
unsigned int comedi_buf_write_alloc_strict(comedi_async * async,
unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0) {
nbytes = 0;
}
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
/* transfers a chunk from writer to filled buffer space */
unsigned comedi_buf_write_free(comedi_async * async, unsigned int nbytes)
{
if ((int)(async->buf_write_count + nbytes -
async->buf_write_alloc_count) > 0) {
rt_printk
("comedi: attempted to write-free more bytes than have been write-allocated.\n");
nbytes = async->buf_write_alloc_count - async->buf_write_count;
}
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
comedi_buf_munge(async, async->buf_write_count - async->munge_count);
if (async->buf_write_ptr >= async->prealloc_bufsz) {
async->buf_write_ptr %= async->prealloc_bufsz;
}
return nbytes;
}
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned comedi_buf_read_alloc(comedi_async * async, unsigned nbytes)
{
if ((int)(async->buf_read_alloc_count + nbytes - async->munge_count) >
0) {
nbytes = async->munge_count - async->buf_read_alloc_count;
}
async->buf_read_alloc_count += nbytes;
/* barrier insures read of munge_count occurs before we actually read
data out of buffer */
smp_rmb();
return nbytes;
}
/* transfers control of a chunk from reader to free buffer space */
unsigned comedi_buf_read_free(comedi_async * async, unsigned int nbytes)
{
// barrier insures data has been read out of buffer before read count is incremented
smp_mb();
if ((int)(async->buf_read_count + nbytes -
async->buf_read_alloc_count) > 0) {
rt_printk
("comedi: attempted to read-free more bytes than have been read-allocated.\n");
nbytes = async->buf_read_alloc_count - async->buf_read_count;
}
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
void comedi_buf_memcpy_to(comedi_async * async, unsigned int offset,
const void *data, unsigned int num_bytes)
{
unsigned int write_ptr = async->buf_write_ptr + offset;
if (write_ptr >= async->prealloc_bufsz)
write_ptr %= async->prealloc_bufsz;
while (num_bytes) {
unsigned int block_size;
if (write_ptr + num_bytes > async->prealloc_bufsz)
block_size = async->prealloc_bufsz - write_ptr;
else
block_size = num_bytes;
memcpy(async->prealloc_buf + write_ptr, data, block_size);
data += block_size;
num_bytes -= block_size;
write_ptr = 0;
}
}
void comedi_buf_memcpy_from(comedi_async * async, unsigned int offset,
void *dest, unsigned int nbytes)
{
void *src;
unsigned int read_ptr = async->buf_read_ptr + offset;
if (read_ptr >= async->prealloc_bufsz)
read_ptr %= async->prealloc_bufsz;
while (nbytes) {
unsigned int block_size;
src = async->prealloc_buf + read_ptr;
if (nbytes >= async->prealloc_bufsz - read_ptr)
block_size = async->prealloc_bufsz - read_ptr;
else
block_size = nbytes;
memcpy(dest, src, block_size);
nbytes -= block_size;
dest += block_size;
read_ptr = 0;
}
}
unsigned int comedi_buf_read_n_available(comedi_async * async)
{
unsigned num_bytes;
if (async == NULL)
return 0;
num_bytes = async->munge_count - async->buf_read_count;
/* barrier insures the read of munge_count in this
query occurs before any following reads of the buffer which
might be based on the return value from this query.
*/
smp_rmb();
return num_bytes;
}
int comedi_buf_get(comedi_async * async, sampl_t * x)
{
unsigned int n = comedi_buf_read_n_available(async);
if (n < sizeof(sampl_t))
return 0;
comedi_buf_read_alloc(async, sizeof(sampl_t));
*x = *(sampl_t *) (async->prealloc_buf + async->buf_read_ptr);
comedi_buf_read_free(async, sizeof(sampl_t));
return 1;
}
int comedi_buf_put(comedi_async * async, sampl_t x)
{
unsigned int n = comedi_buf_write_alloc_strict(async, sizeof(sampl_t));
if (n < sizeof(sampl_t)) {
async->events |= COMEDI_CB_ERROR;
return 0;
}
*(sampl_t *) (async->prealloc_buf + async->buf_write_ptr) = x;
comedi_buf_write_free(async, sizeof(sampl_t));
return 1;
}
void comedi_reset_async_buf(comedi_async * async)
{
async->buf_write_alloc_count = 0;
async->buf_write_count = 0;
async->buf_read_alloc_count = 0;
async->buf_read_count = 0;
async->buf_write_ptr = 0;
async->buf_read_ptr = 0;
async->cur_chan = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
async->munge_ptr = 0;
async->events = 0;
}
int comedi_auto_config(struct device *hardware_device, const char *board_name, const int *options, unsigned num_options)
{
comedi_devconfig it;
int minor;
struct comedi_device_file_info *dev_file_info;
int retval;
minor = comedi_alloc_board_minor(hardware_device);
if(minor < 0) return minor;
dev_set_drvdata(hardware_device, (void*)(unsigned long)minor);
dev_file_info = comedi_get_device_file_info(minor);
memset(&it, 0, sizeof(it));
strncpy(it.board_name, board_name, COMEDI_NAMELEN);
it.board_name[COMEDI_NAMELEN - 1] = '\0';
BUG_ON(num_options > COMEDI_NDEVCONFOPTS);
memcpy(it.options, options, num_options * sizeof(int));
mutex_lock(&dev_file_info->device->mutex);
retval = comedi_device_attach(dev_file_info->device, &it);
mutex_unlock(&dev_file_info->device->mutex);
if(retval < 0)
{
comedi_free_board_minor(minor);
}
return retval;
}
void comedi_auto_unconfig(struct device *hardware_device)
{
unsigned long minor = (unsigned long)dev_get_drvdata(hardware_device);
BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
comedi_free_board_minor(minor);
}
int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name)
{
int options[2];
// pci bus
options[0] = pcidev->bus->number;
// pci slot
options[1] = PCI_SLOT(pcidev->devfn);
return comedi_auto_config(&pcidev->dev, board_name, options, sizeof(options) / sizeof(options[0]));
}
void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
{
comedi_auto_unconfig(&pcidev->dev);
}

Some files were not shown because too many files have changed in this diff Show More