forked from Minki/linux
mISDN: Early confirm for transparent data
It is better to send a confirm for transparent data early as possible to avoid TX underuns. Signed-off-by: Karsten Keil <kkeil@linux-pingi.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1368112c07
commit
8bfddfbe21
@ -488,15 +488,11 @@ hdlc_fill_fifo(struct bchannel *bch)
|
|||||||
static void
|
static void
|
||||||
HDLC_irq_xpr(struct bchannel *bch)
|
HDLC_irq_xpr(struct bchannel *bch)
|
||||||
{
|
{
|
||||||
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
|
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) {
|
||||||
hdlc_fill_fifo(bch);
|
hdlc_fill_fifo(bch);
|
||||||
else {
|
} else {
|
||||||
if (bch->tx_skb) {
|
if (bch->tx_skb)
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &bch->Flags))
|
|
||||||
confirm_Bsend(bch);
|
|
||||||
dev_kfree_skb(bch->tx_skb);
|
dev_kfree_skb(bch->tx_skb);
|
||||||
}
|
|
||||||
if (get_next_bframe(bch))
|
if (get_next_bframe(bch))
|
||||||
hdlc_fill_fifo(bch);
|
hdlc_fill_fifo(bch);
|
||||||
}
|
}
|
||||||
@ -659,22 +655,17 @@ avm_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct fritzcard *fc = bch->hw;
|
struct fritzcard *fc = bch->hw;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
u32 id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
spin_lock_irqsave(&fc->lock, flags);
|
spin_lock_irqsave(&fc->lock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
hdlc_fill_fifo(bch);
|
hdlc_fill_fifo(bch);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
spin_unlock_irqrestore(&fc->lock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(&fc->lock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&fc->lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(&fc->lock, flags);
|
spin_lock_irqsave(&fc->lock, flags);
|
||||||
|
@ -2166,13 +2166,9 @@ next_frame:
|
|||||||
HFC_wait_nodebug(hc);
|
HFC_wait_nodebug(hc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* send confirm, since get_net_bframe will not do it with trans */
|
|
||||||
if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
|
|
||||||
confirm_Bsend(bch);
|
|
||||||
|
|
||||||
/* check for next frame */
|
|
||||||
dev_kfree_skb(*sp);
|
dev_kfree_skb(*sp);
|
||||||
if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */
|
/* check for next frame */
|
||||||
|
if (bch && get_next_bframe(bch)) {
|
||||||
len = (*sp)->len;
|
len = (*sp)->len;
|
||||||
goto next_frame;
|
goto next_frame;
|
||||||
}
|
}
|
||||||
@ -3482,8 +3478,7 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct hfc_multi *hc = bch->hw;
|
struct hfc_multi *hc = bch->hw;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
unsigned int id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
@ -3492,19 +3487,13 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
spin_lock_irqsave(&hc->lock, flags);
|
spin_lock_irqsave(&hc->lock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
hfcmulti_tx(hc, bch->slot);
|
hfcmulti_tx(hc, bch->slot);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
/* start fifo */
|
/* start fifo */
|
||||||
HFC_outb_nodebug(hc, R_FIFO, 0);
|
HFC_outb_nodebug(hc, R_FIFO, 0);
|
||||||
HFC_wait_nodebug(hc);
|
HFC_wait_nodebug(hc);
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) {
|
}
|
||||||
spin_unlock_irqrestore(&hc->lock, flags);
|
spin_unlock_irqrestore(&hc->lock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&hc->lock, flags);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&hc->lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
if (debug & DEBUG_HFCMULTI_MSG)
|
if (debug & DEBUG_HFCMULTI_MSG)
|
||||||
|
@ -849,9 +849,6 @@ hfcpci_fill_fifo(struct bchannel *bch)
|
|||||||
*z1t = cpu_to_le16(new_z1); /* now send data */
|
*z1t = cpu_to_le16(new_z1); /* now send data */
|
||||||
if (bch->tx_idx < bch->tx_skb->len)
|
if (bch->tx_idx < bch->tx_skb->len)
|
||||||
return;
|
return;
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &bch->Flags))
|
|
||||||
confirm_Bsend(bch);
|
|
||||||
dev_kfree_skb(bch->tx_skb);
|
dev_kfree_skb(bch->tx_skb);
|
||||||
if (get_next_bframe(bch))
|
if (get_next_bframe(bch))
|
||||||
goto next_t_frame;
|
goto next_t_frame;
|
||||||
@ -1691,22 +1688,17 @@ hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct hfc_pci *hc = bch->hw;
|
struct hfc_pci *hc = bch->hw;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
unsigned int id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
spin_lock_irqsave(&hc->lock, flags);
|
spin_lock_irqsave(&hc->lock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
hfcpci_fill_fifo(bch);
|
hfcpci_fill_fifo(bch);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
spin_unlock_irqrestore(&hc->lock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(&hc->lock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&hc->lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(&hc->lock, flags);
|
spin_lock_irqsave(&hc->lock, flags);
|
||||||
|
@ -226,15 +226,8 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
if (debug & DBG_HFC_CALL_TRACE)
|
if (debug & DBG_HFC_CALL_TRACE)
|
||||||
printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
|
printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
|
||||||
hw->name, __func__, ret);
|
hw->name, __func__, ret);
|
||||||
if (ret > 0) {
|
if (ret > 0)
|
||||||
/*
|
|
||||||
* other l1 drivers don't send early confirms on
|
|
||||||
* transp data, but hfcsusb does because tx_next
|
|
||||||
* skb is needed in tx_iso_complete()
|
|
||||||
*/
|
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
|
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
|
||||||
@ -1365,12 +1358,8 @@ tx_iso_complete(struct urb *urb)
|
|||||||
if (fifo->dch && get_next_dframe(fifo->dch))
|
if (fifo->dch && get_next_dframe(fifo->dch))
|
||||||
tx_skb = fifo->dch->tx_skb;
|
tx_skb = fifo->dch->tx_skb;
|
||||||
else if (fifo->bch &&
|
else if (fifo->bch &&
|
||||||
get_next_bframe(fifo->bch)) {
|
get_next_bframe(fifo->bch))
|
||||||
if (test_bit(FLG_TRANSPARENT,
|
|
||||||
&fifo->bch->Flags))
|
|
||||||
confirm_Bsend(fifo->bch);
|
|
||||||
tx_skb = fifo->bch->tx_skb;
|
tx_skb = fifo->bch->tx_skb;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
errcode = usb_submit_urb(urb, GFP_ATOMIC);
|
errcode = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
|
@ -1011,15 +1011,11 @@ hscx_fill_fifo(struct hscx_hw *hscx)
|
|||||||
static void
|
static void
|
||||||
hscx_xpr(struct hscx_hw *hx)
|
hscx_xpr(struct hscx_hw *hx)
|
||||||
{
|
{
|
||||||
if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len)
|
if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) {
|
||||||
hscx_fill_fifo(hx);
|
hscx_fill_fifo(hx);
|
||||||
else {
|
} else {
|
||||||
if (hx->bch.tx_skb) {
|
if (hx->bch.tx_skb)
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
|
|
||||||
confirm_Bsend(&hx->bch);
|
|
||||||
dev_kfree_skb(hx->bch.tx_skb);
|
dev_kfree_skb(hx->bch.tx_skb);
|
||||||
}
|
|
||||||
if (get_next_bframe(&hx->bch))
|
if (get_next_bframe(&hx->bch))
|
||||||
hscx_fill_fifo(hx);
|
hscx_fill_fifo(hx);
|
||||||
}
|
}
|
||||||
@ -1342,22 +1338,17 @@ hscx_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch);
|
struct hscx_hw *hx = container_of(bch, struct hscx_hw, bch);
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
u32 id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
spin_lock_irqsave(hx->ip->hwlock, flags);
|
spin_lock_irqsave(hx->ip->hwlock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
hscx_fill_fifo(hx);
|
hscx_fill_fifo(hx);
|
||||||
spin_unlock_irqrestore(hx->ip->hwlock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(hx->ip->hwlock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(hx->ip->hwlock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(hx->ip->hwlock, flags);
|
spin_lock_irqsave(hx->ip->hwlock, flags);
|
||||||
|
@ -702,15 +702,11 @@ send_next(struct isar_ch *ch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ch->bch.tx_skb) {
|
if (ch->bch.tx_skb)
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &ch->bch.Flags))
|
|
||||||
confirm_Bsend(&ch->bch);
|
|
||||||
dev_kfree_skb(ch->bch.tx_skb);
|
dev_kfree_skb(ch->bch.tx_skb);
|
||||||
}
|
if (get_next_bframe(&ch->bch)) {
|
||||||
if (get_next_bframe(&ch->bch))
|
|
||||||
isar_fill_fifo(ch);
|
isar_fill_fifo(ch);
|
||||||
else {
|
} else {
|
||||||
if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) {
|
if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) {
|
||||||
if (test_and_clear_bit(FLG_LASTDATA,
|
if (test_and_clear_bit(FLG_LASTDATA,
|
||||||
&ch->bch.Flags)) {
|
&ch->bch.Flags)) {
|
||||||
@ -1487,14 +1483,10 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
spin_lock_irqsave(ich->is->hwlock, flags);
|
spin_lock_irqsave(ich->is->hwlock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
isar_fill_fifo(ich);
|
isar_fill_fifo(ich);
|
||||||
spin_unlock_irqrestore(ich->is->hwlock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(ich->is->hwlock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(ich->is->hwlock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(ich->is->hwlock, flags);
|
spin_lock_irqsave(ich->is->hwlock, flags);
|
||||||
|
@ -595,15 +595,11 @@ fill_dma(struct tiger_ch *bc)
|
|||||||
static int
|
static int
|
||||||
bc_next_frame(struct tiger_ch *bc)
|
bc_next_frame(struct tiger_ch *bc)
|
||||||
{
|
{
|
||||||
if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len)
|
if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
|
||||||
fill_dma(bc);
|
fill_dma(bc);
|
||||||
else {
|
} else {
|
||||||
if (bc->bch.tx_skb) {
|
if (bc->bch.tx_skb)
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
|
|
||||||
confirm_Bsend(&bc->bch);
|
|
||||||
dev_kfree_skb(bc->bch.tx_skb);
|
dev_kfree_skb(bc->bch.tx_skb);
|
||||||
}
|
|
||||||
if (get_next_bframe(&bc->bch))
|
if (get_next_bframe(&bc->bch))
|
||||||
fill_dma(bc);
|
fill_dma(bc);
|
||||||
else
|
else
|
||||||
@ -732,22 +728,17 @@ nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
|
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
|
||||||
struct tiger_hw *card = bch->hw;
|
struct tiger_hw *card = bch->hw;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
u32 id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
spin_lock_irqsave(&card->lock, flags);
|
spin_lock_irqsave(&card->lock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
fill_dma(bc);
|
fill_dma(bc);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
spin_unlock_irqrestore(&card->lock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(&card->lock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&card->lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(&card->lock, flags);
|
spin_lock_irqsave(&card->lock, flags);
|
||||||
|
@ -638,15 +638,11 @@ w6692_mode(struct w6692_ch *wch, u32 pr)
|
|||||||
static void
|
static void
|
||||||
send_next(struct w6692_ch *wch)
|
send_next(struct w6692_ch *wch)
|
||||||
{
|
{
|
||||||
if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len)
|
if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) {
|
||||||
W6692_fill_Bfifo(wch);
|
W6692_fill_Bfifo(wch);
|
||||||
else {
|
} else {
|
||||||
if (wch->bch.tx_skb) {
|
if (wch->bch.tx_skb)
|
||||||
/* send confirm, on trans, free on hdlc. */
|
|
||||||
if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
|
|
||||||
confirm_Bsend(&wch->bch);
|
|
||||||
dev_kfree_skb(wch->bch.tx_skb);
|
dev_kfree_skb(wch->bch.tx_skb);
|
||||||
}
|
|
||||||
if (get_next_bframe(&wch->bch))
|
if (get_next_bframe(&wch->bch))
|
||||||
W6692_fill_Bfifo(wch);
|
W6692_fill_Bfifo(wch);
|
||||||
}
|
}
|
||||||
@ -944,22 +940,17 @@ w6692_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
|
|||||||
struct w6692_hw *card = bch->hw;
|
struct w6692_hw *card = bch->hw;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
struct mISDNhead *hh = mISDN_HEAD_P(skb);
|
||||||
u32 id;
|
unsigned long flags;
|
||||||
u_long flags;
|
|
||||||
|
|
||||||
switch (hh->prim) {
|
switch (hh->prim) {
|
||||||
case PH_DATA_REQ:
|
case PH_DATA_REQ:
|
||||||
spin_lock_irqsave(&card->lock, flags);
|
spin_lock_irqsave(&card->lock, flags);
|
||||||
ret = bchannel_senddata(bch, skb);
|
ret = bchannel_senddata(bch, skb);
|
||||||
if (ret > 0) { /* direct TX */
|
if (ret > 0) { /* direct TX */
|
||||||
id = hh->id; /* skb can be freed */
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
W6692_fill_Bfifo(bc);
|
W6692_fill_Bfifo(bc);
|
||||||
spin_unlock_irqrestore(&card->lock, flags);
|
}
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
spin_unlock_irqrestore(&card->lock, flags);
|
||||||
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
|
|
||||||
} else
|
|
||||||
spin_unlock_irqrestore(&card->lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
case PH_ACTIVATE_REQ:
|
case PH_ACTIVATE_REQ:
|
||||||
spin_lock_irqsave(&card->lock, flags);
|
spin_lock_irqsave(&card->lock, flags);
|
||||||
|
@ -272,7 +272,7 @@ get_next_dframe(struct dchannel *dch)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_next_dframe);
|
EXPORT_SYMBOL(get_next_dframe);
|
||||||
|
|
||||||
void
|
static void
|
||||||
confirm_Bsend(struct bchannel *bch)
|
confirm_Bsend(struct bchannel *bch)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
@ -294,7 +294,6 @@ confirm_Bsend(struct bchannel *bch)
|
|||||||
skb_queue_tail(&bch->rqueue, skb);
|
skb_queue_tail(&bch->rqueue, skb);
|
||||||
schedule_event(bch, FLG_RECVQUEUE);
|
schedule_event(bch, FLG_RECVQUEUE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(confirm_Bsend);
|
|
||||||
|
|
||||||
int
|
int
|
||||||
get_next_bframe(struct bchannel *bch)
|
get_next_bframe(struct bchannel *bch)
|
||||||
@ -305,8 +304,8 @@ get_next_bframe(struct bchannel *bch)
|
|||||||
if (bch->tx_skb) {
|
if (bch->tx_skb) {
|
||||||
bch->next_skb = NULL;
|
bch->next_skb = NULL;
|
||||||
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
||||||
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
|
/* confirm imediately to allow next data */
|
||||||
confirm_Bsend(bch); /* not for transparent */
|
confirm_Bsend(bch);
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
||||||
@ -395,6 +394,7 @@ bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
|
|||||||
/* write to fifo */
|
/* write to fifo */
|
||||||
ch->tx_skb = skb;
|
ch->tx_skb = skb;
|
||||||
ch->tx_idx = 0;
|
ch->tx_idx = 0;
|
||||||
|
confirm_Bsend(ch);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,6 @@ extern void recv_Echannel(struct dchannel *, struct dchannel *);
|
|||||||
extern void recv_Bchannel(struct bchannel *, unsigned int id);
|
extern void recv_Bchannel(struct bchannel *, unsigned int id);
|
||||||
extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
|
extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
|
||||||
extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
|
extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
|
||||||
extern void confirm_Bsend(struct bchannel *bch);
|
|
||||||
extern int get_next_bframe(struct bchannel *);
|
extern int get_next_bframe(struct bchannel *);
|
||||||
extern int get_next_dframe(struct dchannel *);
|
extern int get_next_dframe(struct dchannel *);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user