forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: niu: panic on reset netlink: fix overrun in attribute iteration [Bluetooth] Fix regression from using default link policy ath9k: Assign seq# when mac80211 requests this
This commit is contained in:
commit
c19e80808b
@ -5984,6 +5984,56 @@ static void niu_netif_start(struct niu *np)
|
||||
niu_enable_interrupts(np, 1);
|
||||
}
|
||||
|
||||
static void niu_reset_buffers(struct niu *np)
|
||||
{
|
||||
int i, j, k, err;
|
||||
|
||||
if (np->rx_rings) {
|
||||
for (i = 0; i < np->num_rx_rings; i++) {
|
||||
struct rx_ring_info *rp = &np->rx_rings[i];
|
||||
|
||||
for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
|
||||
struct page *page;
|
||||
|
||||
page = rp->rxhash[j];
|
||||
while (page) {
|
||||
struct page *next =
|
||||
(struct page *) page->mapping;
|
||||
u64 base = page->index;
|
||||
base = base >> RBR_DESCR_ADDR_SHIFT;
|
||||
rp->rbr[k++] = cpu_to_le32(base);
|
||||
page = next;
|
||||
}
|
||||
}
|
||||
for (; k < MAX_RBR_RING_SIZE; k++) {
|
||||
err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
}
|
||||
|
||||
rp->rbr_index = rp->rbr_table_size - 1;
|
||||
rp->rcr_index = 0;
|
||||
rp->rbr_pending = 0;
|
||||
rp->rbr_refill_pending = 0;
|
||||
}
|
||||
}
|
||||
if (np->tx_rings) {
|
||||
for (i = 0; i < np->num_tx_rings; i++) {
|
||||
struct tx_ring_info *rp = &np->tx_rings[i];
|
||||
|
||||
for (j = 0; j < MAX_TX_RING_SIZE; j++) {
|
||||
if (rp->tx_buffs[j].skb)
|
||||
(void) release_tx_packet(np, rp, j);
|
||||
}
|
||||
|
||||
rp->pending = MAX_TX_RING_SIZE;
|
||||
rp->prod = 0;
|
||||
rp->cons = 0;
|
||||
rp->wrap_bit = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void niu_reset_task(struct work_struct *work)
|
||||
{
|
||||
struct niu *np = container_of(work, struct niu, reset_task);
|
||||
@ -6006,6 +6056,12 @@ static void niu_reset_task(struct work_struct *work)
|
||||
|
||||
niu_stop_hw(np);
|
||||
|
||||
spin_unlock_irqrestore(&np->lock, flags);
|
||||
|
||||
niu_reset_buffers(np);
|
||||
|
||||
spin_lock_irqsave(&np->lock, flags);
|
||||
|
||||
err = niu_init_hw(np);
|
||||
if (!err) {
|
||||
np->timer.expires = jiffies + HZ;
|
||||
|
@ -209,6 +209,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
|
||||
unsigned int curlen;
|
||||
struct ath_txq *cabq;
|
||||
struct ath_txq *mcastq;
|
||||
struct ieee80211_tx_info *info;
|
||||
avp = sc->sc_vaps[if_id];
|
||||
|
||||
mcastq = &avp->av_mcastq;
|
||||
@ -232,6 +233,18 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
|
||||
*/
|
||||
curlen = skb->len;
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
|
||||
/*
|
||||
* TODO: make sure the seq# gets assigned properly (vs. other
|
||||
* TX frames)
|
||||
*/
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
sc->seq_no += 0x10;
|
||||
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
||||
hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
|
||||
}
|
||||
|
||||
/* XXX: spin_lock_bh should not be used here, but sparse bitches
|
||||
* otherwise. We should fix sparse :) */
|
||||
spin_lock_bh(&mcastq->axq_lock);
|
||||
|
@ -992,6 +992,7 @@ struct ath_softc {
|
||||
u32 sc_txintrperiod; /* tx interrupt batching */
|
||||
int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
|
||||
u32 sc_ant_tx[8]; /* recent tx frames/antenna */
|
||||
u16 seq_no; /* TX sequence number */
|
||||
|
||||
/* Beacon */
|
||||
struct ath9k_tx_queue_info sc_beacon_qi;
|
||||
|
@ -369,6 +369,20 @@ static int ath9k_tx(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct ath_softc *sc = hw->priv;
|
||||
int hdrlen, padsize;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
/*
|
||||
* As a temporary workaround, assign seq# here; this will likely need
|
||||
* to be cleaned up to work better with Beacon transmission and virtual
|
||||
* BSSes.
|
||||
*/
|
||||
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
|
||||
sc->seq_no += 0x10;
|
||||
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
||||
hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
|
||||
}
|
||||
|
||||
/* Add the padding after the header if this is not already done */
|
||||
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
|
||||
|
@ -702,7 +702,7 @@ static inline int nla_len(const struct nlattr *nla)
|
||||
*/
|
||||
static inline int nla_ok(const struct nlattr *nla, int remaining)
|
||||
{
|
||||
return remaining >= sizeof(*nla) &&
|
||||
return remaining >= (int) sizeof(*nla) &&
|
||||
nla->nla_len >= sizeof(*nla) &&
|
||||
nla->nla_len <= remaining;
|
||||
}
|
||||
|
@ -164,6 +164,9 @@ static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!test_bit(HCI_UP, &hdev->flags))
|
||||
return -ENETDOWN;
|
||||
|
||||
/* Serialize all requests */
|
||||
hci_req_lock(hdev);
|
||||
ret = __hci_request(hdev, req, opt, timeout);
|
||||
|
Loading…
Reference in New Issue
Block a user