forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Don't OOPS on socket AIO, from Christoph Hellwig. 2) Scheduled scans should be aborted upon RFKILL, from Emmanuel Grumbach. 3) Fix sleep in atomic context in kvaser_usb, from Ahmed S Darwish. 4) Fix RCU locking across copy_to_user() in bpf code, from Alexei Starovoitov. 5) Lots of crash, memory leak, short TX packet et al bug fixes in sh_eth from Ben Hutchings. 6) Fix memory corruption in SCTP wrt. INIT collitions, from Daniel Borkmann. 7) Fix return value logic for poll handlers in netxen, enic, and bnx2x. From Eric Dumazet and Govindarajulu Varadarajan. 8) Header length calculation fix in mac80211 from Fred Chou. 9) mv643xx_eth doesn't handle highmem correctly in non-TSO code paths. From Ezequiel Garcia. 10) udp_diag has bogus logic in it's hash chain skipping, copy same fix tcp diag used. From Herbert Xu. 11) amd-xgbe programs wrong rx flow control register, from Thomas Lendacky. 12) Fix race leading to use after free in ping receive path, from Subash Abhinov Kasiviswanathan. 13) Cache redirect routes otherwise we can get a heavy backlog of rcu jobs liberating DST_NOCACHE entries. From Hannes Frederic Sowa. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (48 commits) net: don't OOPS on socket aio stmmac: prevent probe drivers to crash kernel bnx2x: fix napi poll return value for repoll ipv6: replacing a rt6_info needs to purge possible propagated rt6_infos too sh_eth: Fix DMA-API usage for RX buffers sh_eth: Check for DMA mapping errors on transmit sh_eth: Ensure DMA engines are stopped before freeing buffers sh_eth: Remove RX overflow log messages ping: Fix race in free in receive path udp_diag: Fix socket skipping within chain can: kvaser_usb: Fix state handling upon BUS_ERROR events can: kvaser_usb: Retry the first bulk transfer on -ETIMEDOUT can: kvaser_usb: Send correct context to URB completion can: kvaser_usb: Do not sleep in atomic context ipv4: try to cache dst_entries which would cause a redirect samples: bpf: relax test_maps check bpf: rcu lock must not be held when calling copy_to_user() net: sctp: fix slab corruption from use after free on INIT collisions net: mv643xx_eth: Fix highmem support in non-TSO egress path sh_eth: Fix serialisation of interrupt disable with interrupt & NAPI handlers ...
This commit is contained in:
commit
59343cd7c4
@ -166,12 +166,12 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ethphy1: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
ethphy1: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
|
||||
ethphy2: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
ethphy2: ethernet-phy@2 {
|
||||
reg = <2>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
|
||||
|
||||
c_can_irq_control(priv, false);
|
||||
|
||||
/* put ctrl to init on stop to end ongoing transmission */
|
||||
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
|
||||
|
||||
/* deactivate pins */
|
||||
pinctrl_pm_select_sleep_state(dev->dev.parent);
|
||||
priv->can.state = CAN_STATE_STOPPED;
|
||||
|
@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
||||
usb_sndbulkpipe(dev->udev,
|
||||
dev->bulk_out->bEndpointAddress),
|
||||
buf, msg->len,
|
||||
kvaser_usb_simple_msg_callback, priv);
|
||||
kvaser_usb_simple_msg_callback, netdev);
|
||||
usb_anchor_urb(urb, &priv->tx_submitted);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
||||
priv = dev->nets[channel];
|
||||
stats = &priv->netdev->stats;
|
||||
|
||||
if (status & M16C_STATE_BUS_RESET) {
|
||||
kvaser_usb_unlink_tx_urbs(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
skb = alloc_can_err_skb(priv->netdev, &cf);
|
||||
if (!skb) {
|
||||
stats->rx_dropped++;
|
||||
@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
||||
|
||||
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
|
||||
|
||||
if (status & M16C_STATE_BUS_OFF) {
|
||||
if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
|
||||
cf->can_id |= CAN_ERR_BUSOFF;
|
||||
|
||||
priv->can.can_stats.bus_off++;
|
||||
@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
||||
}
|
||||
|
||||
new_state = CAN_STATE_ERROR_PASSIVE;
|
||||
}
|
||||
|
||||
if (status == M16C_STATE_BUS_ERROR) {
|
||||
} else if (status & M16C_STATE_BUS_ERROR) {
|
||||
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
|
||||
((txerr >= 96) || (rxerr >= 96))) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
|
||||
|
||||
priv->can.can_stats.error_warning++;
|
||||
new_state = CAN_STATE_ERROR_WARNING;
|
||||
} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
|
||||
} else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
|
||||
((txerr < 96) && (rxerr < 96))) {
|
||||
cf->can_id |= CAN_ERR_PROT;
|
||||
cf->data[2] = CAN_ERR_PROT_ACTIVE;
|
||||
|
||||
@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
||||
{
|
||||
struct kvaser_usb *dev;
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
int i, retry = 3;
|
||||
|
||||
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
||||
|
||||
usb_set_intfdata(intf, dev);
|
||||
|
||||
err = kvaser_usb_get_software_info(dev);
|
||||
/* On some x86 laptops, plugging a Kvaser device again after
|
||||
* an unplug makes the firmware always ignore the very first
|
||||
* command. For such a case, provide some room for retries
|
||||
* instead of completely exiting the driver.
|
||||
*/
|
||||
do {
|
||||
err = kvaser_usb_get_software_info(dev);
|
||||
} while (--retry && err == -ETIMEDOUT);
|
||||
|
||||
if (err) {
|
||||
dev_err(&intf->dev,
|
||||
"Cannot get software infos, error %d\n", err);
|
||||
|
@ -767,16 +767,17 @@
|
||||
#define MTL_Q_RQOMR 0x40
|
||||
#define MTL_Q_RQMPOCR 0x44
|
||||
#define MTL_Q_RQDR 0x4c
|
||||
#define MTL_Q_RQFCR 0x50
|
||||
#define MTL_Q_IER 0x70
|
||||
#define MTL_Q_ISR 0x74
|
||||
|
||||
/* MTL queue register entry bit positions and sizes */
|
||||
#define MTL_Q_RQFCR_RFA_INDEX 1
|
||||
#define MTL_Q_RQFCR_RFA_WIDTH 6
|
||||
#define MTL_Q_RQFCR_RFD_INDEX 17
|
||||
#define MTL_Q_RQFCR_RFD_WIDTH 6
|
||||
#define MTL_Q_RQOMR_EHFC_INDEX 7
|
||||
#define MTL_Q_RQOMR_EHFC_WIDTH 1
|
||||
#define MTL_Q_RQOMR_RFA_INDEX 8
|
||||
#define MTL_Q_RQOMR_RFA_WIDTH 3
|
||||
#define MTL_Q_RQOMR_RFD_INDEX 13
|
||||
#define MTL_Q_RQOMR_RFD_WIDTH 3
|
||||
#define MTL_Q_RQOMR_RQS_INDEX 16
|
||||
#define MTL_Q_RQOMR_RQS_WIDTH 9
|
||||
#define MTL_Q_RQOMR_RSF_INDEX 5
|
||||
|
@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
|
||||
|
||||
for (i = 0; i < pdata->rx_q_count; i++) {
|
||||
/* Activate flow control when less than 4k left in fifo */
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
|
||||
|
||||
/* De-activate flow control when more than 6k left in fifo */
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
#endif
|
||||
if (!bnx2x_fp_lock_napi(fp))
|
||||
return work_done;
|
||||
return budget;
|
||||
|
||||
for_each_cos_in_tx_queue(fp, cos)
|
||||
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
|
||||
|
@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
|
||||
int err;
|
||||
|
||||
if (!enic_poll_lock_napi(&enic->rq[rq]))
|
||||
return work_done;
|
||||
return budget;
|
||||
/* Service RQ
|
||||
*/
|
||||
|
||||
|
@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
|
||||
#define IS_TSO_HEADER(txq, addr) \
|
||||
((addr >= txq->tso_hdrs_dma) && \
|
||||
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
|
||||
|
||||
#define DESC_DMA_MAP_SINGLE 0
|
||||
#define DESC_DMA_MAP_PAGE 1
|
||||
|
||||
/*
|
||||
* RX/TX descriptors.
|
||||
*/
|
||||
@ -362,6 +366,7 @@ struct tx_queue {
|
||||
dma_addr_t tso_hdrs_dma;
|
||||
|
||||
struct tx_desc *tx_desc_area;
|
||||
char *tx_desc_mapping; /* array to track the type of the dma mapping */
|
||||
dma_addr_t tx_desc_dma;
|
||||
int tx_desc_area_size;
|
||||
|
||||
@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
|
||||
if (txq->tx_curr_desc == txq->tx_ring_size)
|
||||
txq->tx_curr_desc = 0;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
|
||||
|
||||
desc->l4i_chk = 0;
|
||||
desc->byte_cnt = length;
|
||||
@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
|
||||
skb_frag_t *this_frag;
|
||||
int tx_index;
|
||||
struct tx_desc *desc;
|
||||
void *addr;
|
||||
|
||||
this_frag = &skb_shinfo(skb)->frags[frag];
|
||||
addr = page_address(this_frag->page.p) + this_frag->page_offset;
|
||||
tx_index = txq->tx_curr_desc++;
|
||||
if (txq->tx_curr_desc == txq->tx_ring_size)
|
||||
txq->tx_curr_desc = 0;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
|
||||
|
||||
/*
|
||||
* The last fragment will generate an interrupt
|
||||
@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
|
||||
|
||||
desc->l4i_chk = 0;
|
||||
desc->byte_cnt = skb_frag_size(this_frag);
|
||||
desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
|
||||
this_frag, 0, desc->byte_cnt,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
|
||||
if (txq->tx_curr_desc == txq->tx_ring_size)
|
||||
txq->tx_curr_desc = 0;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
|
||||
|
||||
if (nr_frags) {
|
||||
txq_submit_frag_skb(txq, skb);
|
||||
@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
||||
int tx_index;
|
||||
struct tx_desc *desc;
|
||||
u32 cmd_sts;
|
||||
char desc_dma_map;
|
||||
|
||||
tx_index = txq->tx_used_desc;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
desc_dma_map = txq->tx_desc_mapping[tx_index];
|
||||
|
||||
cmd_sts = desc->cmd_sts;
|
||||
|
||||
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
|
||||
@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
||||
reclaimed++;
|
||||
txq->tx_desc_count--;
|
||||
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr))
|
||||
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
|
||||
|
||||
if (desc_dma_map == DESC_DMA_MAP_PAGE)
|
||||
dma_unmap_page(mp->dev->dev.parent,
|
||||
desc->buf_ptr,
|
||||
desc->byte_cnt,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(mp->dev->dev.parent,
|
||||
desc->buf_ptr,
|
||||
desc->byte_cnt,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (cmd_sts & TX_ENABLE_INTERRUPT) {
|
||||
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
|
||||
@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
|
||||
struct tx_queue *txq = mp->txq + index;
|
||||
struct tx_desc *tx_desc;
|
||||
int size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
txq->index = index;
|
||||
@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
|
||||
nexti * sizeof(struct tx_desc);
|
||||
}
|
||||
|
||||
txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
|
||||
GFP_KERNEL);
|
||||
if (!txq->tx_desc_mapping) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_desc_area;
|
||||
}
|
||||
|
||||
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
|
||||
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
|
||||
txq->tx_ring_size * TSO_HEADER_SIZE,
|
||||
&txq->tso_hdrs_dma, GFP_KERNEL);
|
||||
if (txq->tso_hdrs == NULL) {
|
||||
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
|
||||
txq->tx_desc_area, txq->tx_desc_dma);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_free_desc_mapping;
|
||||
}
|
||||
skb_queue_head_init(&txq->tx_skb);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_desc_mapping:
|
||||
kfree(txq->tx_desc_mapping);
|
||||
err_free_desc_area:
|
||||
if (index == 0 && size <= mp->tx_desc_sram_size)
|
||||
iounmap(txq->tx_desc_area);
|
||||
else
|
||||
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
|
||||
txq->tx_desc_area, txq->tx_desc_dma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void txq_deinit(struct tx_queue *txq)
|
||||
@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
|
||||
else
|
||||
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
|
||||
txq->tx_desc_area, txq->tx_desc_dma);
|
||||
kfree(txq->tx_desc_mapping);
|
||||
|
||||
if (txq->tso_hdrs)
|
||||
dma_free_coherent(mp->dev->dev.parent,
|
||||
txq->tx_ring_size * TSO_HEADER_SIZE,
|
||||
|
@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
work_done = netxen_process_rcv_ring(sds_ring, budget);
|
||||
|
||||
if ((work_done < budget) && tx_complete) {
|
||||
if (!tx_complete)
|
||||
work_done = budget;
|
||||
|
||||
if (work_done < budget) {
|
||||
napi_complete(&sds_ring->napi);
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state))
|
||||
netxen_nic_enable_int(sds_ring);
|
||||
|
@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
|
||||
[TSU_ADRL31] = 0x01fc,
|
||||
};
|
||||
|
||||
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
|
||||
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
|
||||
|
||||
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
|
||||
{
|
||||
return mdp->reg_offset == sh_eth_offset_gigabit;
|
||||
@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
|
||||
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
||||
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
mdp->cur_rx = 0;
|
||||
mdp->cur_tx = 0;
|
||||
@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||
/* skb */
|
||||
mdp->rx_skbuff[i] = NULL;
|
||||
skb = netdev_alloc_skb(ndev, skbuff_size);
|
||||
mdp->rx_skbuff[i] = skb;
|
||||
if (skb == NULL)
|
||||
break;
|
||||
sh_eth_set_receive_align(skb);
|
||||
@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
||||
rxdesc = &mdp->rx_ring[i];
|
||||
/* The size of the buffer is a multiple of 16 bytes. */
|
||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
||||
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
|
||||
DMA_FROM_DEVICE);
|
||||
rxdesc->addr = virt_to_phys(skb->data);
|
||||
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
||||
rxdesc->buffer_length,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, dma_addr)) {
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
mdp->rx_skbuff[i] = skb;
|
||||
rxdesc->addr = dma_addr;
|
||||
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
|
||||
|
||||
/* Rx descriptor address set */
|
||||
@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
|
||||
RFLR);
|
||||
|
||||
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
|
||||
if (start)
|
||||
if (start) {
|
||||
mdp->irq_enabled = true;
|
||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
||||
}
|
||||
|
||||
/* PAUSE Prohibition */
|
||||
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
|
||||
@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sh_eth_dev_exit(struct net_device *ndev)
|
||||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
int i;
|
||||
|
||||
/* Deactivate all TX descriptors, so DMA should stop at next
|
||||
* packet boundary if it's currently running
|
||||
*/
|
||||
for (i = 0; i < mdp->num_tx_ring; i++)
|
||||
mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
|
||||
|
||||
/* Disable TX FIFO egress to MAC */
|
||||
sh_eth_rcv_snd_disable(ndev);
|
||||
|
||||
/* Stop RX DMA at next packet boundary */
|
||||
sh_eth_write(ndev, 0, EDRRR);
|
||||
|
||||
/* Aside from TX DMA, we can't tell when the hardware is
|
||||
* really stopped, so we need to reset to make sure.
|
||||
* Before doing that, wait for long enough to *probably*
|
||||
* finish transmitting the last packet and poll stats.
|
||||
*/
|
||||
msleep(2); /* max frame time at 10 Mbps < 1250 us */
|
||||
sh_eth_get_stats(ndev);
|
||||
sh_eth_reset(ndev);
|
||||
}
|
||||
|
||||
/* free Tx skb function */
|
||||
static int sh_eth_txfree(struct net_device *ndev)
|
||||
{
|
||||
@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||
u16 pkt_len = 0;
|
||||
u32 desc_status;
|
||||
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||
mdp->rx_skbuff[entry] = NULL;
|
||||
if (mdp->cd->rpadir)
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
|
||||
ALIGN(mdp->rx_buf_sz, 16),
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_single(&ndev->dev, rxdesc->addr,
|
||||
ALIGN(mdp->rx_buf_sz, 16),
|
||||
DMA_FROM_DEVICE);
|
||||
skb_put(skb, pkt_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
netif_receive_skb(skb);
|
||||
@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
||||
|
||||
if (mdp->rx_skbuff[entry] == NULL) {
|
||||
skb = netdev_alloc_skb(ndev, skbuff_size);
|
||||
mdp->rx_skbuff[entry] = skb;
|
||||
if (skb == NULL)
|
||||
break; /* Better luck next round. */
|
||||
sh_eth_set_receive_align(skb);
|
||||
dma_map_single(&ndev->dev, skb->data,
|
||||
rxdesc->buffer_length, DMA_FROM_DEVICE);
|
||||
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
||||
rxdesc->buffer_length,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, dma_addr)) {
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
mdp->rx_skbuff[entry] = skb;
|
||||
|
||||
skb_checksum_none_assert(skb);
|
||||
rxdesc->addr = virt_to_phys(skb->data);
|
||||
rxdesc->addr = dma_addr;
|
||||
}
|
||||
if (entry >= mdp->num_rx_ring - 1)
|
||||
rxdesc->status |=
|
||||
@ -1573,7 +1617,6 @@ ignore_link:
|
||||
if (intr_status & EESR_RFRMER) {
|
||||
/* Receive Frame Overflow int */
|
||||
ndev->stats.rx_frame_errors++;
|
||||
netif_err(mdp, rx_err, ndev, "Receive Abort\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1592,13 +1635,11 @@ ignore_link:
|
||||
if (intr_status & EESR_RDE) {
|
||||
/* Receive Descriptor Empty int */
|
||||
ndev->stats.rx_over_errors++;
|
||||
netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
|
||||
}
|
||||
|
||||
if (intr_status & EESR_RFE) {
|
||||
/* Receive FIFO Overflow int */
|
||||
ndev->stats.rx_fifo_errors++;
|
||||
netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
|
||||
}
|
||||
|
||||
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
|
||||
@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
|
||||
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
|
||||
ret = IRQ_HANDLED;
|
||||
else
|
||||
goto other_irq;
|
||||
goto out;
|
||||
|
||||
if (!likely(mdp->irq_enabled)) {
|
||||
sh_eth_write(ndev, 0, EESIPR);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (intr_status & EESR_RX_CHECK) {
|
||||
if (napi_schedule_prep(&mdp->napi)) {
|
||||
@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
|
||||
sh_eth_error(ndev, intr_status);
|
||||
}
|
||||
|
||||
other_irq:
|
||||
out:
|
||||
spin_unlock(&mdp->lock);
|
||||
|
||||
return ret;
|
||||
@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
|
||||
napi_complete(napi);
|
||||
|
||||
/* Reenable Rx interrupts */
|
||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
||||
if (mdp->irq_enabled)
|
||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
||||
out:
|
||||
return budget - quota;
|
||||
}
|
||||
@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
|
||||
return -EINVAL;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_device_detach(ndev);
|
||||
netif_tx_disable(ndev);
|
||||
/* Disable interrupts by clearing the interrupt mask. */
|
||||
sh_eth_write(ndev, 0x0000, EESIPR);
|
||||
/* Stop the chip's Tx and Rx processes. */
|
||||
sh_eth_write(ndev, 0, EDTRR);
|
||||
sh_eth_write(ndev, 0, EDRRR);
|
||||
synchronize_irq(ndev->irq);
|
||||
}
|
||||
|
||||
/* Free all the skbuffs in the Rx queue. */
|
||||
sh_eth_ring_free(ndev);
|
||||
/* Free DMA buffer */
|
||||
sh_eth_free_dma_buffer(mdp);
|
||||
/* Serialise with the interrupt handler and NAPI, then
|
||||
* disable interrupts. We have to clear the
|
||||
* irq_enabled flag first to ensure that interrupts
|
||||
* won't be re-enabled.
|
||||
*/
|
||||
mdp->irq_enabled = false;
|
||||
synchronize_irq(ndev->irq);
|
||||
napi_synchronize(&mdp->napi);
|
||||
sh_eth_write(ndev, 0x0000, EESIPR);
|
||||
|
||||
sh_eth_dev_exit(ndev);
|
||||
|
||||
/* Free all the skbuffs in the Rx queue. */
|
||||
sh_eth_ring_free(ndev);
|
||||
/* Free DMA buffer */
|
||||
sh_eth_free_dma_buffer(mdp);
|
||||
}
|
||||
|
||||
/* Set new parameters */
|
||||
mdp->num_rx_ring = ring->rx_pending;
|
||||
mdp->num_tx_ring = ring->tx_pending;
|
||||
|
||||
ret = sh_eth_ring_init(ndev);
|
||||
if (ret < 0) {
|
||||
netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
ret = sh_eth_dev_init(ndev, false);
|
||||
if (ret < 0) {
|
||||
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
ret = sh_eth_ring_init(ndev);
|
||||
if (ret < 0) {
|
||||
netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
ret = sh_eth_dev_init(ndev, false);
|
||||
if (ret < 0) {
|
||||
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mdp->irq_enabled = true;
|
||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
||||
/* Setting the Rx mode will start the Rx process. */
|
||||
sh_eth_write(ndev, EDRRR_R, EDRRR);
|
||||
netif_wake_queue(ndev);
|
||||
netif_device_attach(ndev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
spin_unlock_irqrestore(&mdp->lock, flags);
|
||||
|
||||
if (skb_padto(skb, ETH_ZLEN))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
entry = mdp->cur_tx % mdp->num_tx_ring;
|
||||
mdp->tx_skbuff[entry] = skb;
|
||||
txdesc = &mdp->tx_ring[entry];
|
||||
@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
skb->len + 2);
|
||||
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (skb->len < ETH_ZLEN)
|
||||
txdesc->buffer_length = ETH_ZLEN;
|
||||
else
|
||||
txdesc->buffer_length = skb->len;
|
||||
if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
txdesc->buffer_length = skb->len;
|
||||
|
||||
if (entry >= mdp->num_tx_ring - 1)
|
||||
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
|
||||
@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
/* Disable interrupts by clearing the interrupt mask. */
|
||||
/* Serialise with the interrupt handler and NAPI, then disable
|
||||
* interrupts. We have to clear the irq_enabled flag first to
|
||||
* ensure that interrupts won't be re-enabled.
|
||||
*/
|
||||
mdp->irq_enabled = false;
|
||||
synchronize_irq(ndev->irq);
|
||||
napi_disable(&mdp->napi);
|
||||
sh_eth_write(ndev, 0x0000, EESIPR);
|
||||
|
||||
/* Stop the chip's Tx and Rx processes. */
|
||||
sh_eth_write(ndev, 0, EDTRR);
|
||||
sh_eth_write(ndev, 0, EDRRR);
|
||||
sh_eth_dev_exit(ndev);
|
||||
|
||||
sh_eth_get_stats(ndev);
|
||||
/* PHY Disconnect */
|
||||
if (mdp->phydev) {
|
||||
phy_stop(mdp->phydev);
|
||||
@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
|
||||
|
||||
free_irq(ndev->irq, ndev);
|
||||
|
||||
napi_disable(&mdp->napi);
|
||||
|
||||
/* Free all the skbuffs in the Rx queue. */
|
||||
sh_eth_ring_free(ndev);
|
||||
|
||||
|
@ -513,6 +513,7 @@ struct sh_eth_private {
|
||||
u32 rx_buf_sz; /* Based on MTU+slack. */
|
||||
int edmac_endian;
|
||||
struct napi_struct napi;
|
||||
bool irq_enabled;
|
||||
/* MII transceiver section. */
|
||||
u32 phy_id; /* PHY ID */
|
||||
struct mii_bus *mii_bus; /* MDIO bus control */
|
||||
|
@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
||||
* @addr: iobase memory address
|
||||
* Description: this is the main probe function used to
|
||||
* call the alloc_etherdev, allocate the priv structure.
|
||||
* Return:
|
||||
* on success the new private structure is returned, otherwise the error
|
||||
* pointer.
|
||||
*/
|
||||
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
|
||||
struct plat_stmmacenet_data *plat_dat,
|
||||
@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
|
||||
if (!ndev)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
SET_NETDEV_DEV(ndev, device);
|
||||
|
||||
|
@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
||||
if (vid == priv->data.default_vlan)
|
||||
return 0;
|
||||
|
||||
if (priv->data.dual_emac) {
|
||||
/* In dual EMAC, reserved VLAN id should not be used for
|
||||
* creating VLAN interfaces as this can break the dual
|
||||
* EMAC port separation
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
if (vid == priv->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
|
||||
return cpsw_add_vlan_ale_entry(priv, vid);
|
||||
}
|
||||
@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
||||
if (vid == priv->data.default_vlan)
|
||||
return 0;
|
||||
|
||||
if (priv->data.dual_emac) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->data.slaves; i++) {
|
||||
if (vid == priv->slaves[i].port_vlan)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
|
||||
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
|
||||
if (ret != 0)
|
||||
|
@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
};
|
||||
|
||||
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
|
||||
if (IS_ERR(dst))
|
||||
if (dst->error) {
|
||||
ret = dst->error;
|
||||
dst_release(dst);
|
||||
goto err;
|
||||
|
||||
}
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst);
|
||||
err = ip6_local_out(skb);
|
||||
|
@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
|
||||
|
||||
__ath_cancel_work(sc);
|
||||
|
||||
disable_irq(sc->irq);
|
||||
tasklet_disable(&sc->intr_tq);
|
||||
tasklet_disable(&sc->bcon_tasklet);
|
||||
spin_lock_bh(&sc->sc_pcu_lock);
|
||||
@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
|
||||
r = -EIO;
|
||||
|
||||
out:
|
||||
enable_irq(sc->irq);
|
||||
spin_unlock_bh(&sc->sc_pcu_lock);
|
||||
tasklet_enable(&sc->bcon_tasklet);
|
||||
tasklet_enable(&sc->intr_tq);
|
||||
@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
|
||||
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
|
||||
return IRQ_NONE;
|
||||
|
||||
if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* shared irq, not for us */
|
||||
if (!ath9k_hw_intrpend(ah))
|
||||
return IRQ_NONE;
|
||||
@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
|
||||
ath9k_debug_sync_cause(sc, sync_cause);
|
||||
status &= ah->imask; /* discard unasked-for bits */
|
||||
|
||||
if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
|
||||
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/*
|
||||
|
@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
|
||||
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
|
||||
* regardless of the band or the number of the probes. FW will calculate
|
||||
* the actual dwell time.
|
||||
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
|
||||
*/
|
||||
enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
|
||||
@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
|
||||
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
|
||||
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
|
||||
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
|
||||
};
|
||||
|
||||
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
|
||||
* @flags: enum iwl_scan_channel_flgs
|
||||
* @non_ebs_ratio: how many regular scan iteration before EBS
|
||||
* @flags: enum iwl_scan_channel_flags
|
||||
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
|
||||
* involved.
|
||||
* 1 - EBS is disabled.
|
||||
* 2 - every second scan will be full scan(and so on).
|
||||
*/
|
||||
struct iwl_scan_channel_opt {
|
||||
__le16 flags;
|
||||
|
@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||
msk |= mvmsta->tfd_queue_msk;
|
||||
}
|
||||
|
||||
if (drop) {
|
||||
if (iwl_mvm_flush_tx_path(mvm, msk, true))
|
||||
IWL_ERR(mvm, "flush request fail\n");
|
||||
mutex_unlock(&mvm->mutex);
|
||||
} else {
|
||||
mutex_unlock(&mvm->mutex);
|
||||
msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
|
||||
|
||||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
*/
|
||||
iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
|
||||
}
|
||||
if (iwl_mvm_flush_tx_path(mvm, msk, true))
|
||||
IWL_ERR(mvm, "flush request fail\n");
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
*/
|
||||
iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
|
||||
}
|
||||
|
||||
const struct ieee80211_ops iwl_mvm_hw_ops = {
|
||||
|
@ -72,6 +72,8 @@
|
||||
|
||||
#define IWL_PLCP_QUIET_THRESH 1
|
||||
#define IWL_ACTIVE_QUIET_TIME 10
|
||||
#define IWL_DENSE_EBS_SCAN_RATIO 5
|
||||
#define IWL_SPARSE_EBS_SCAN_RATIO 1
|
||||
|
||||
struct iwl_mvm_scan_params {
|
||||
u32 max_out_time;
|
||||
@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
|
||||
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
|
||||
notify);
|
||||
|
||||
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
|
||||
return 0;
|
||||
|
||||
if (iwl_mvm_is_radio_killed(mvm))
|
||||
goto out;
|
||||
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
|
||||
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
|
||||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
|
||||
@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
|
||||
if (mvm->scan_status == IWL_MVM_SCAN_OS)
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
|
||||
|
||||
out:
|
||||
mvm->scan_status = IWL_MVM_SCAN_NONE;
|
||||
|
||||
if (notify) {
|
||||
@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
|
||||
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
|
||||
cmd->iter_num = cpu_to_le32(1);
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
|
||||
mvm->last_ebs_successful) {
|
||||
cmd->channel_opt[0].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
cmd->channel_opt[1].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
}
|
||||
|
||||
if (iwl_mvm_rrm_scan_needed(mvm))
|
||||
cmd->scan_flags |=
|
||||
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
|
||||
@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
|
||||
cmd->schedule[1].iterations = 0;
|
||||
cmd->schedule[1].full_scan_mul = 0;
|
||||
|
||||
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
|
||||
mvm->last_ebs_successful) {
|
||||
cmd->channel_opt[0].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
cmd->channel_opt[0].non_ebs_ratio =
|
||||
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
|
||||
cmd->channel_opt[1].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
cmd->channel_opt[1].non_ebs_ratio =
|
||||
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
|
||||
}
|
||||
|
||||
for (i = 1; i <= req->req.n_ssids; i++)
|
||||
ssid_bitmap |= BIT(i);
|
||||
|
||||
@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
|
||||
cmd->schedule[1].iterations = 0xff;
|
||||
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
|
||||
mvm->last_ebs_successful) {
|
||||
cmd->channel_opt[0].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
cmd->channel_opt[0].non_ebs_ratio =
|
||||
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
|
||||
cmd->channel_opt[1].flags =
|
||||
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
|
||||
cmd->channel_opt[1].non_ebs_ratio =
|
||||
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
|
||||
}
|
||||
|
||||
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
|
||||
ssid_bitmap, cmd);
|
||||
|
||||
|
@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
|
||||
if (ieee80211_is_probe_resp(fc))
|
||||
tx_flags |= TX_CMD_FLG_TSF;
|
||||
else if (ieee80211_is_back_req(fc))
|
||||
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
|
||||
|
||||
if (ieee80211_has_morefrags(fc))
|
||||
tx_flags |= TX_CMD_FLG_MORE_FRAG;
|
||||
@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
tx_cmd->tid_tspec = qc[0] & 0xf;
|
||||
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
|
||||
} else if (ieee80211_is_back_req(fc)) {
|
||||
struct ieee80211_bar *bar = (void *)skb->data;
|
||||
u16 control = le16_to_cpu(bar->control);
|
||||
|
||||
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
|
||||
tx_cmd->tid_tspec = (control &
|
||||
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
|
||||
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
|
||||
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
|
||||
} else {
|
||||
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
|
||||
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
|
||||
|
@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
||||
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
|
||||
card = CARD_FROM_CDEV(channel->ccwdev);
|
||||
iob = qeth_get_buffer(channel);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
iob->callback = idx_reply_cb;
|
||||
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
|
||||
channel->ccw.count = QETH_BUFSIZE;
|
||||
@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
||||
QETH_DBF_TEXT(SETUP, 2, "idxactch");
|
||||
|
||||
iob = qeth_get_buffer(channel);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
iob->callback = idx_reply_cb;
|
||||
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
|
||||
channel->ccw.count = IDX_ACTIVATE_SIZE;
|
||||
@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
|
||||
|
||||
/**
|
||||
* qeth_send_control_data() - send control command to the card
|
||||
* @card: qeth_card structure pointer
|
||||
* @len: size of the command buffer
|
||||
* @iob: qeth_cmd_buffer pointer
|
||||
* @reply_cb: callback function pointer
|
||||
* @cb_card: pointer to the qeth_card structure
|
||||
* @cb_reply: pointer to the qeth_reply structure
|
||||
* @cb_cmd: pointer to the original iob for non-IPA
|
||||
* commands, or to the qeth_ipa_cmd structure
|
||||
* for the IPA commands.
|
||||
* @reply_param: private pointer passed to the callback
|
||||
*
|
||||
* Returns the value of the `return_code' field of the response
|
||||
* block returned from the hardware, or other error indication.
|
||||
* Value of zero indicates successful execution of the command.
|
||||
*
|
||||
* Callback function gets called one or more times, with cb_cmd
|
||||
* pointing to the response returned by the hardware. Callback
|
||||
* function must return non-zero if more reply blocks are expected,
|
||||
* and zero if the last or only reply block is received. Callback
|
||||
* function can get the value of the reply_param pointer from the
|
||||
* field 'param' of the structure qeth_reply.
|
||||
*/
|
||||
|
||||
int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
struct qeth_cmd_buffer *iob,
|
||||
int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
|
||||
unsigned long),
|
||||
int (*reply_cb)(struct qeth_card *cb_card,
|
||||
struct qeth_reply *cb_reply,
|
||||
unsigned long cb_cmd),
|
||||
void *reply_param)
|
||||
{
|
||||
int rc;
|
||||
@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
|
||||
struct qeth_cmd_buffer *iob;
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
|
||||
iob = qeth_wait_for_buffer(&card->write);
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
|
||||
iob = qeth_get_buffer(&card->write);
|
||||
if (iob) {
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
|
||||
} else {
|
||||
dev_warn(&card->gdev->dev,
|
||||
"The qeth driver ran out of channel command buffers\n");
|
||||
QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
|
||||
dev_name(&card->gdev->dev));
|
||||
}
|
||||
|
||||
return iob;
|
||||
}
|
||||
@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
|
||||
|
||||
/**
|
||||
* qeth_send_ipa_cmd() - send an IPA command
|
||||
*
|
||||
* See qeth_send_control_data() for explanation of the arguments.
|
||||
*/
|
||||
|
||||
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
|
||||
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
|
||||
unsigned long),
|
||||
@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
|
||||
QETH_DBF_TEXT(SETUP, 2, "strtlan");
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
|
||||
return rc;
|
||||
}
|
||||
@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
|
||||
QETH_PROT_IPV4);
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
|
||||
cmd->data.setadapterparms.hdr.command_code = command;
|
||||
cmd->data.setadapterparms.hdr.used_total = 1;
|
||||
cmd->data.setadapterparms.hdr.seq_no = 1;
|
||||
if (iob) {
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
|
||||
cmd->data.setadapterparms.hdr.command_code = command;
|
||||
cmd->data.setadapterparms.hdr.used_total = 1;
|
||||
cmd->data.setadapterparms.hdr.seq_no = 1;
|
||||
}
|
||||
|
||||
return iob;
|
||||
}
|
||||
@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
|
||||
QETH_CARD_TEXT(card, 3, "queryadp");
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
|
||||
sizeof(struct qeth_ipacmd_setadpparms));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
|
||||
return rc;
|
||||
}
|
||||
@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
|
||||
|
||||
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
|
||||
return rc;
|
||||
}
|
||||
@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
|
||||
return -ENOMEDIUM;
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
|
||||
sizeof(struct qeth_ipacmd_setadpparms_hdr));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
return qeth_send_ipa_cmd(card, iob,
|
||||
qeth_query_switch_attributes_cb, sw_info);
|
||||
}
|
||||
@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.diagass.subcmd_len = 16;
|
||||
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
|
||||
@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
|
||||
|
||||
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.diagass.subcmd_len = 80;
|
||||
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
|
||||
@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
|
||||
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
|
||||
sizeof(struct qeth_ipacmd_setadpparms));
|
||||
if (!iob)
|
||||
return;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setadapterparms.data.mode = mode;
|
||||
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
|
||||
@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
|
||||
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
|
||||
sizeof(struct qeth_ipacmd_setadpparms));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
|
||||
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
|
||||
@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
|
||||
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
|
||||
sizeof(struct qeth_set_access_ctrl));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
|
||||
access_ctrl_req->subcmd_code = isolation;
|
||||
@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
||||
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
|
||||
QETH_SNMP_SETADP_CMDLENGTH + req_len);
|
||||
if (!iob) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
|
||||
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
|
||||
@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
|
||||
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
|
||||
rc = -EFAULT;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(ureq);
|
||||
kfree(qinfo.udata);
|
||||
return rc;
|
||||
@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
|
||||
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
|
||||
sizeof(struct qeth_query_oat));
|
||||
if (!iob) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
oat_req = &cmd->data.setadapterparms.data.query_oat;
|
||||
oat_req->subcmd_code = oat_data.command;
|
||||
@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
|
||||
return -EOPNOTSUPP;
|
||||
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
|
||||
sizeof(struct qeth_ipacmd_setadpparms_hdr));
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
|
||||
(void *)carrier_info);
|
||||
}
|
||||
@ -5060,11 +5133,23 @@ retriable:
|
||||
card->options.adp.supported_funcs = 0;
|
||||
card->options.sbp.supported_funcs = 0;
|
||||
card->info.diagass_support = 0;
|
||||
qeth_query_ipassists(card, QETH_PROT_IPV4);
|
||||
if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
|
||||
qeth_query_setadapterparms(card);
|
||||
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
|
||||
qeth_query_setdiagass(card);
|
||||
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
|
||||
if (rc == -ENOMEM)
|
||||
goto out;
|
||||
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
|
||||
rc = qeth_query_setadapterparms(card);
|
||||
if (rc < 0) {
|
||||
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
|
||||
rc = qeth_query_setdiagass(card);
|
||||
if (rc < 0) {
|
||||
QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
|
||||
|
@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
|
||||
static int qeth_l2_stop(struct net_device *);
|
||||
static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
|
||||
static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
|
||||
enum qeth_ipa_cmds,
|
||||
int (*reply_cb) (struct qeth_card *,
|
||||
struct qeth_reply*,
|
||||
unsigned long));
|
||||
enum qeth_ipa_cmds);
|
||||
static void qeth_l2_set_multicast_list(struct net_device *);
|
||||
static int qeth_l2_recover(void *);
|
||||
static void qeth_bridgeport_query_support(struct qeth_card *card);
|
||||
@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
|
||||
return ndev;
|
||||
}
|
||||
|
||||
static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply,
|
||||
unsigned long data)
|
||||
static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
__u8 *mac;
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Sgmacb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
mac = &cmd->data.setdelmac.mac[0];
|
||||
/* MAC already registered, needed in couple/uncouple case */
|
||||
if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
|
||||
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
|
||||
mac, QETH_CARD_IFNAME(card));
|
||||
cmd->hdr.return_code = 0;
|
||||
if (retcode)
|
||||
QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
|
||||
switch (retcode) {
|
||||
case IPA_RC_SUCCESS:
|
||||
rc = 0;
|
||||
break;
|
||||
case IPA_RC_L2_UNSUPPORTED_CMD:
|
||||
rc = -ENOSYS;
|
||||
break;
|
||||
case IPA_RC_L2_ADDR_TABLE_FULL:
|
||||
rc = -ENOSPC;
|
||||
break;
|
||||
case IPA_RC_L2_DUP_MAC:
|
||||
case IPA_RC_L2_DUP_LAYER3_MAC:
|
||||
rc = -EEXIST;
|
||||
break;
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
|
||||
rc = -EPERM;
|
||||
break;
|
||||
case IPA_RC_L2_MAC_NOT_FOUND:
|
||||
rc = -ENOENT;
|
||||
break;
|
||||
case -ENOMEM:
|
||||
rc = -ENOMEM;
|
||||
break;
|
||||
default:
|
||||
rc = -EIO;
|
||||
break;
|
||||
}
|
||||
if (cmd->hdr.return_code)
|
||||
QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
|
||||
mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
|
||||
{
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Sgmac");
|
||||
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
|
||||
qeth_l2_send_setgroupmac_cb);
|
||||
}
|
||||
|
||||
static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply,
|
||||
unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
__u8 *mac;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Dgmacb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
mac = &cmd->data.setdelmac.mac[0];
|
||||
if (cmd->hdr.return_code)
|
||||
QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
|
||||
mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
|
||||
return 0;
|
||||
rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
|
||||
IPA_CMD_SETGMAC));
|
||||
if (rc == -EEXIST)
|
||||
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
|
||||
mac, QETH_CARD_IFNAME(card));
|
||||
else if (rc)
|
||||
QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
|
||||
mac, QETH_CARD_IFNAME(card), rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
|
||||
{
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Dgmac");
|
||||
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
|
||||
qeth_l2_send_delgroupmac_cb);
|
||||
rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
|
||||
IPA_CMD_DELGMAC));
|
||||
if (rc)
|
||||
QETH_DBF_MESSAGE(2,
|
||||
"Could not delete group MAC %pM on %s: %d\n",
|
||||
mac, QETH_CARD_IFNAME(card), rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
|
||||
@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
|
||||
mc->is_vmac = vmac;
|
||||
|
||||
if (vmac) {
|
||||
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
|
||||
NULL);
|
||||
rc = qeth_setdel_makerc(card,
|
||||
qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
|
||||
} else {
|
||||
rc = qeth_l2_send_setgroupmac(card, mac);
|
||||
rc = qeth_setdel_makerc(card,
|
||||
qeth_l2_send_setgroupmac(card, mac));
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
|
||||
if (del) {
|
||||
if (mc->is_vmac)
|
||||
qeth_l2_send_setdelmac(card, mc->mc_addr,
|
||||
IPA_CMD_DELVMAC, NULL);
|
||||
IPA_CMD_DELVMAC);
|
||||
else
|
||||
qeth_l2_send_delgroupmac(card, mc->mc_addr);
|
||||
}
|
||||
@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
|
||||
|
||||
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
|
||||
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setdelvlan.vlan_id = i;
|
||||
return qeth_send_ipa_cmd(card, iob,
|
||||
@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
struct qeth_vlan_vid *id;
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
|
||||
if (!vid)
|
||||
@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
|
||||
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
|
||||
if (id) {
|
||||
id->vid = vid;
|
||||
qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
|
||||
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
|
||||
if (rc) {
|
||||
kfree(id);
|
||||
return rc;
|
||||
}
|
||||
spin_lock_bh(&card->vlanlock);
|
||||
list_add_tail(&id->list, &card->vid_list);
|
||||
spin_unlock_bh(&card->vlanlock);
|
||||
@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
|
||||
{
|
||||
struct qeth_vlan_vid *id, *tmpid = NULL;
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
int rc = 0;
|
||||
|
||||
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
|
||||
if (card->info.type == QETH_CARD_TYPE_OSM) {
|
||||
@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
|
||||
}
|
||||
spin_unlock_bh(&card->vlanlock);
|
||||
if (tmpid) {
|
||||
qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
|
||||
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
|
||||
kfree(tmpid);
|
||||
}
|
||||
qeth_l2_set_multicast_list(card->dev);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
|
||||
@ -539,91 +560,62 @@ out:
|
||||
}
|
||||
|
||||
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
|
||||
enum qeth_ipa_cmds ipacmd,
|
||||
int (*reply_cb) (struct qeth_card *,
|
||||
struct qeth_reply*,
|
||||
unsigned long))
|
||||
enum qeth_ipa_cmds ipacmd)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_cmd_buffer *iob;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2sdmac");
|
||||
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
|
||||
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
|
||||
return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
|
||||
}
|
||||
|
||||
static int qeth_l2_send_setmac_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply,
|
||||
unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Smaccb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (cmd->hdr.return_code) {
|
||||
QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
switch (cmd->hdr.return_code) {
|
||||
case IPA_RC_L2_DUP_MAC:
|
||||
case IPA_RC_L2_DUP_LAYER3_MAC:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM already exists\n",
|
||||
cmd->data.setdelmac.mac);
|
||||
break;
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
|
||||
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM is not authorized\n",
|
||||
cmd->data.setdelmac.mac);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
|
||||
OSA_ADDR_LEN);
|
||||
dev_info(&card->gdev->dev,
|
||||
"MAC address %pM successfully registered on device %s\n",
|
||||
card->dev->dev_addr, card->dev->name);
|
||||
}
|
||||
return 0;
|
||||
return qeth_send_ipa_cmd(card, iob, NULL, NULL);
|
||||
}
|
||||
|
||||
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
|
||||
{
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Setmac");
|
||||
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
|
||||
qeth_l2_send_setmac_cb);
|
||||
}
|
||||
|
||||
static int qeth_l2_send_delmac_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply,
|
||||
unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Dmaccb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (cmd->hdr.return_code) {
|
||||
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
|
||||
return 0;
|
||||
rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
|
||||
IPA_CMD_SETVMAC));
|
||||
if (rc == 0) {
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
|
||||
dev_info(&card->gdev->dev,
|
||||
"MAC address %pM successfully registered on device %s\n",
|
||||
card->dev->dev_addr, card->dev->name);
|
||||
} else {
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
switch (rc) {
|
||||
case -EEXIST:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM already exists\n", mac);
|
||||
break;
|
||||
case -EPERM:
|
||||
dev_warn(&card->gdev->dev,
|
||||
"MAC address %pM is not authorized\n", mac);
|
||||
break;
|
||||
}
|
||||
}
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
|
||||
{
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Delmac");
|
||||
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
|
||||
return 0;
|
||||
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
|
||||
qeth_l2_send_delmac_cb);
|
||||
rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
|
||||
IPA_CMD_DELVMAC));
|
||||
if (rc == 0)
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_request_initial_mac(struct qeth_card *card)
|
||||
@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
|
||||
"device %s: x%x\n", CARD_BUS_ID(card), rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
|
||||
return rc;
|
||||
}
|
||||
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
|
||||
@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
|
||||
if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
|
||||
if (!rc || (rc == -ENOENT))
|
||||
rc = qeth_l2_send_setmac(card, addr->sa_data);
|
||||
return rc ? -EINVAL : 0;
|
||||
}
|
||||
@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
|
||||
recover_flag = card->state;
|
||||
rc = qeth_core_hardsetup_card(card);
|
||||
if (rc) {
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
|
||||
rc = -ENODEV;
|
||||
goto out_remove;
|
||||
}
|
||||
@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "brqsuppo");
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
|
||||
if (!iob)
|
||||
return;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.sbp.hdr.cmdlength =
|
||||
sizeof(struct qeth_ipacmd_sbp_hdr) +
|
||||
@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
|
||||
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
|
||||
return -EOPNOTSUPP;
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.sbp.hdr.cmdlength =
|
||||
sizeof(struct qeth_ipacmd_sbp_hdr);
|
||||
@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
|
||||
if (rc)
|
||||
return rc;
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
|
||||
|
||||
@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
|
||||
if (!(card->options.sbp.supported_funcs & setcmd))
|
||||
return -EOPNOTSUPP;
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.sbp.hdr.cmdlength = cmdlength;
|
||||
cmd->data.sbp.hdr.command_code = setcmd;
|
||||
|
@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
|
||||
QETH_CARD_TEXT(card, 4, "setdelmc");
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
|
||||
if (addr->proto == QETH_PROT_IPV6)
|
||||
@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
|
||||
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
if (addr->proto == QETH_PROT_IPV6) {
|
||||
memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
|
||||
@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "setroutg");
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setrtg.type = (type);
|
||||
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
|
||||
@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
|
||||
QETH_CARD_TEXT(card, 4, "getasscm");
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setassparms.hdr.assist_no = ipa_func;
|
||||
cmd->data.setassparms.hdr.length = 8 + len;
|
||||
cmd->data.setassparms.hdr.command_code = cmd_code;
|
||||
cmd->data.setassparms.hdr.return_code = 0;
|
||||
cmd->data.setassparms.hdr.seq_no = 0;
|
||||
if (iob) {
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setassparms.hdr.assist_no = ipa_func;
|
||||
cmd->data.setassparms.hdr.length = 8 + len;
|
||||
cmd->data.setassparms.hdr.command_code = cmd_code;
|
||||
cmd->data.setassparms.hdr.return_code = 0;
|
||||
cmd->data.setassparms.hdr.seq_no = 0;
|
||||
}
|
||||
|
||||
return iob;
|
||||
}
|
||||
@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
|
||||
QETH_CARD_TEXT(card, 4, "simassp6");
|
||||
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
|
||||
0, QETH_PROT_IPV6);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
|
||||
qeth_l3_default_setassparms_cb, NULL);
|
||||
return rc;
|
||||
@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
|
||||
length = sizeof(__u32);
|
||||
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
|
||||
length, QETH_PROT_IPV4);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_l3_send_setassparms(card, iob, length, data,
|
||||
qeth_l3_default_setassparms_cb, NULL);
|
||||
return rc;
|
||||
@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
|
||||
QETH_PROT_IPV6);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
|
||||
card->info.unique_id;
|
||||
@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
|
||||
QETH_PROT_IPV6);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
|
||||
card->info.unique_id;
|
||||
@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
|
||||
QETH_DBF_TEXT(SETUP, 2, "diagtrac");
|
||||
|
||||
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.diagass.subcmd_len = 16;
|
||||
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
|
||||
@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
|
||||
IPA_CMD_ASS_ARP_QUERY_INFO,
|
||||
sizeof(struct qeth_arp_query_data) - sizeof(char),
|
||||
prot);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
|
||||
cmd->data.setassparms.data.query_arp.reply_bits = 0;
|
||||
@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
|
||||
IPA_CMD_ASS_ARP_ADD_ENTRY,
|
||||
sizeof(struct qeth_arp_cache_entry),
|
||||
QETH_PROT_IPV4);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_l3_send_setassparms(card, iob,
|
||||
sizeof(struct qeth_arp_cache_entry),
|
||||
(unsigned long) entry,
|
||||
@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
|
||||
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
|
||||
12,
|
||||
QETH_PROT_IPV4);
|
||||
if (!iob)
|
||||
return -ENOMEM;
|
||||
rc = qeth_l3_send_setassparms(card, iob,
|
||||
12, (unsigned long)buf,
|
||||
qeth_l3_default_setassparms_cb, NULL);
|
||||
@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
|
||||
|
||||
static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (card->info.type == QETH_CARD_TYPE_OSD ||
|
||||
card->info.type == QETH_CARD_TYPE_OSX) {
|
||||
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
|
||||
@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
||||
return -ENODEV;
|
||||
card->dev->flags |= IFF_NOARP;
|
||||
card->dev->netdev_ops = &qeth_l3_netdev_ops;
|
||||
qeth_l3_iqd_read_initial_mac(card);
|
||||
rc = qeth_l3_iqd_read_initial_mac(card);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (card->options.hsuid[0])
|
||||
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
|
||||
} else
|
||||
@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
|
||||
recover_flag = card->state;
|
||||
rc = qeth_core_hardsetup_card(card);
|
||||
if (rc) {
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
|
||||
rc = -ENODEV;
|
||||
goto out_remove;
|
||||
}
|
||||
@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
|
||||
contin:
|
||||
rc = qeth_l3_setadapter_parms(card);
|
||||
if (rc)
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
|
||||
if (!card->options.sniffer) {
|
||||
rc = qeth_l3_start_ipassists(card);
|
||||
if (rc) {
|
||||
@ -3410,10 +3438,10 @@ contin:
|
||||
}
|
||||
rc = qeth_l3_setrouting_v4(card);
|
||||
if (rc)
|
||||
QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
|
||||
rc = qeth_l3_setrouting_v6(card);
|
||||
if (rc)
|
||||
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
|
||||
}
|
||||
netif_tx_disable(card->dev);
|
||||
|
||||
|
@ -39,11 +39,12 @@ struct inet_skb_parm {
|
||||
struct ip_options opt; /* Compiled IP options */
|
||||
unsigned char flags;
|
||||
|
||||
#define IPSKB_FORWARDED 1
|
||||
#define IPSKB_XFRM_TUNNEL_SIZE 2
|
||||
#define IPSKB_XFRM_TRANSFORMED 4
|
||||
#define IPSKB_FRAG_COMPLETE 8
|
||||
#define IPSKB_REROUTED 16
|
||||
#define IPSKB_FORWARDED BIT(0)
|
||||
#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
|
||||
#define IPSKB_XFRM_TRANSFORMED BIT(2)
|
||||
#define IPSKB_FRAG_COMPLETE BIT(3)
|
||||
#define IPSKB_REROUTED BIT(4)
|
||||
#define IPSKB_DOREDIRECT BIT(5)
|
||||
|
||||
u16 frag_max_size;
|
||||
};
|
||||
|
@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
int ufd = attr->map_fd;
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
void *key, *value;
|
||||
void *key, *value, *ptr;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
|
||||
@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
if (copy_from_user(key, ukey, map->key_size) != 0)
|
||||
goto free_key;
|
||||
|
||||
err = -ENOENT;
|
||||
rcu_read_lock();
|
||||
value = map->ops->map_lookup_elem(map, key);
|
||||
err = -ENOMEM;
|
||||
value = kmalloc(map->value_size, GFP_USER);
|
||||
if (!value)
|
||||
goto err_unlock;
|
||||
goto free_key;
|
||||
|
||||
rcu_read_lock();
|
||||
ptr = map->ops->map_lookup_elem(map, key);
|
||||
if (ptr)
|
||||
memcpy(value, ptr, map->value_size);
|
||||
rcu_read_unlock();
|
||||
|
||||
err = -ENOENT;
|
||||
if (!ptr)
|
||||
goto free_value;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_to_user(uvalue, value, map->value_size) != 0)
|
||||
goto err_unlock;
|
||||
goto free_value;
|
||||
|
||||
err = 0;
|
||||
|
||||
err_unlock:
|
||||
rcu_read_unlock();
|
||||
free_value:
|
||||
kfree(value);
|
||||
free_key:
|
||||
kfree(key);
|
||||
err_put:
|
||||
|
@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
|
||||
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
|
||||
ds->index, ds->pd->sw_addr);
|
||||
ds->slave_mii_bus->parent = ds->master_dev;
|
||||
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
|
||||
}
|
||||
|
||||
|
||||
|
@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
|
||||
* We now generate an ICMP HOST REDIRECT giving the route
|
||||
* we calculated.
|
||||
*/
|
||||
if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
|
||||
if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
|
||||
!skb_sec_path(skb))
|
||||
ip_rt_send_redirect(skb);
|
||||
|
||||
skb->priority = rt_tos2priority(iph->tos);
|
||||
|
@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
|
||||
|
||||
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
|
||||
if (sk != NULL) {
|
||||
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
pr_debug("rcv on socket %p\n", sk);
|
||||
ping_queue_rcv_skb(sk, skb_get(skb));
|
||||
if (skb2)
|
||||
ping_queue_rcv_skb(sk, skb2);
|
||||
sock_put(sk);
|
||||
return true;
|
||||
}
|
||||
|
@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
|
||||
do_cache = res->fi && !itag;
|
||||
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
|
||||
skb->protocol == htons(ETH_P_IP) &&
|
||||
(IN_DEV_SHARED_MEDIA(out_dev) ||
|
||||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
|
||||
flags |= RTCF_DOREDIRECT;
|
||||
do_cache = false;
|
||||
}
|
||||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
|
||||
IPCB(skb)->flags |= IPSKB_DOREDIRECT;
|
||||
|
||||
if (skb->protocol != htons(ETH_P_IP)) {
|
||||
/* Not IP (i.e. ARP). Do not create route, if it is
|
||||
@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
||||
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
|
||||
if (rt->rt_flags & RTCF_NOTIFY)
|
||||
r->rtm_flags |= RTM_F_NOTIFY;
|
||||
if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
|
||||
r->rtm_flags |= RTCF_DOREDIRECT;
|
||||
|
||||
if (nla_put_be32(skb, RTA_DST, dst))
|
||||
goto nla_put_failure;
|
||||
|
@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
|
||||
s_slot = cb->args[0];
|
||||
num = s_num = cb->args[1];
|
||||
|
||||
for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
|
||||
for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
|
||||
struct sock *sk;
|
||||
struct hlist_nulls_node *node;
|
||||
struct udp_hslot *hslot = &table->hash[slot];
|
||||
|
||||
num = 0;
|
||||
|
||||
if (hlist_nulls_empty(&hslot->head))
|
||||
continue;
|
||||
|
||||
|
@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
|
||||
struct net *net)
|
||||
{
|
||||
if (atomic_read(&rt->rt6i_ref) != 1) {
|
||||
/* This route is used as dummy address holder in some split
|
||||
* nodes. It is not leaked, but it still holds other resources,
|
||||
* which must be released in time. So, scan ascendant nodes
|
||||
* and replace dummy references to this route with references
|
||||
* to still alive ones.
|
||||
*/
|
||||
while (fn) {
|
||||
if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
|
||||
fn->leaf = fib6_find_prefix(net, fn);
|
||||
atomic_inc(&fn->leaf->rt6i_ref);
|
||||
rt6_release(rt);
|
||||
}
|
||||
fn = fn->parent;
|
||||
}
|
||||
/* No more references are possible at this point. */
|
||||
BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert routing information in a node.
|
||||
*/
|
||||
@ -807,11 +830,12 @@ add:
|
||||
rt->dst.rt6_next = iter->dst.rt6_next;
|
||||
atomic_inc(&rt->rt6i_ref);
|
||||
inet6_rt_notify(RTM_NEWROUTE, rt, info);
|
||||
rt6_release(iter);
|
||||
if (!(fn->fn_flags & RTN_RTINFO)) {
|
||||
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
|
||||
fn->fn_flags |= RTN_RTINFO;
|
||||
}
|
||||
fib6_purge_rt(iter, fn, info->nl_net);
|
||||
rt6_release(iter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
|
||||
fn = fib6_repair_tree(net, fn);
|
||||
}
|
||||
|
||||
if (atomic_read(&rt->rt6i_ref) != 1) {
|
||||
/* This route is used as dummy address holder in some split
|
||||
* nodes. It is not leaked, but it still holds other resources,
|
||||
* which must be released in time. So, scan ascendant nodes
|
||||
* and replace dummy references to this route with references
|
||||
* to still alive ones.
|
||||
*/
|
||||
while (fn) {
|
||||
if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
|
||||
fn->leaf = fib6_find_prefix(net, fn);
|
||||
atomic_inc(&fn->leaf->rt6i_ref);
|
||||
rt6_release(rt);
|
||||
}
|
||||
fn = fn->parent;
|
||||
}
|
||||
/* No more references are possible at this point. */
|
||||
BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
|
||||
}
|
||||
fib6_purge_rt(rt, fn, net);
|
||||
|
||||
inet6_rt_notify(RTM_DELROUTE, rt, info);
|
||||
rt6_release(rt);
|
||||
|
@ -1242,12 +1242,16 @@ restart:
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
else if (rt->dst.error) {
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
} else if (rt == net->ipv6.ip6_null_entry) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rt == net->ipv6.ip6_null_entry) {
|
||||
fn = fib6_backtrack(fn, &fl6->saddr);
|
||||
if (fn)
|
||||
goto restart;
|
||||
}
|
||||
|
||||
out:
|
||||
dst_hold(&rt->dst);
|
||||
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
|
@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
{
|
||||
struct flowi6 *fl6 = &fl->u.ip6;
|
||||
int onlyproto = 0;
|
||||
u16 offset = skb_network_header_len(skb);
|
||||
const struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
u16 offset = sizeof(*hdr);
|
||||
struct ipv6_opt_hdr *exthdr;
|
||||
const unsigned char *nh = skb_network_header(skb);
|
||||
u8 nexthdr = nh[IP6CB(skb)->nhoff];
|
||||
u16 nhoff = IP6CB(skb)->nhoff;
|
||||
int oif = 0;
|
||||
u8 nexthdr;
|
||||
|
||||
if (!nhoff)
|
||||
nhoff = offsetof(struct ipv6hdr, nexthdr);
|
||||
|
||||
nexthdr = nh[nhoff];
|
||||
|
||||
if (skb_dst(skb))
|
||||
oif = skb_dst(skb)->dev->ifindex;
|
||||
|
@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
|
||||
{
|
||||
.procname = "ack",
|
||||
.data = &sysctl_llc2_ack_timeout,
|
||||
.maxlen = sizeof(long),
|
||||
.maxlen = sizeof(sysctl_llc2_ack_timeout),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.procname = "busy",
|
||||
.data = &sysctl_llc2_busy_timeout,
|
||||
.maxlen = sizeof(long),
|
||||
.maxlen = sizeof(sysctl_llc2_busy_timeout),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.procname = "p",
|
||||
.data = &sysctl_llc2_p_timeout,
|
||||
.maxlen = sizeof(long),
|
||||
.maxlen = sizeof(sysctl_llc2_p_timeout),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.procname = "rej",
|
||||
.data = &sysctl_llc2_rej_timeout,
|
||||
.maxlen = sizeof(long),
|
||||
.maxlen = sizeof(sysctl_llc2_rej_timeout),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
|
@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
||||
}
|
||||
}
|
||||
|
||||
/* tear down aggregation sessions and remove STAs */
|
||||
mutex_lock(&local->sta_mtx);
|
||||
list_for_each_entry(sta, &local->sta_list, list) {
|
||||
if (sta->uploaded) {
|
||||
enum ieee80211_sta_state state;
|
||||
|
||||
state = sta->sta_state;
|
||||
for (; state > IEEE80211_STA_NOTEXIST; state--)
|
||||
WARN_ON(drv_sta_state(local, sta->sdata, sta,
|
||||
state, state - 1));
|
||||
}
|
||||
}
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
|
||||
/* remove all interfaces that were created in the driver */
|
||||
list_for_each_entry(sdata, &local->interfaces, list) {
|
||||
if (!ieee80211_sdata_running(sdata))
|
||||
@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
||||
case NL80211_IFTYPE_STATION:
|
||||
ieee80211_mgd_quiesce(sdata);
|
||||
break;
|
||||
case NL80211_IFTYPE_WDS:
|
||||
/* tear down aggregation sessions and remove STAs */
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sdata->u.wds.sta;
|
||||
if (sta && sta->uploaded) {
|
||||
enum ieee80211_sta_state state;
|
||||
|
||||
state = sta->sta_state;
|
||||
for (; state > IEEE80211_STA_NOTEXIST; state--)
|
||||
WARN_ON(drv_sta_state(local, sta->sdata,
|
||||
sta, state,
|
||||
state - 1));
|
||||
}
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
||||
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
|
||||
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
|
||||
else if (rate)
|
||||
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
|
||||
channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
|
||||
else
|
||||
channel_flags |= IEEE80211_CHAN_2GHZ;
|
||||
put_unaligned_le16(channel_flags, pos);
|
||||
|
@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
|
||||
}
|
||||
|
||||
bpf_size = bpf_len * sizeof(*bpf_ops);
|
||||
if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
|
||||
ret = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
|
||||
if (bpf_ops == NULL) {
|
||||
ret = -ENOMEM;
|
||||
@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
|
||||
struct cls_bpf_head *head)
|
||||
{
|
||||
unsigned int i = 0x80000000;
|
||||
u32 handle;
|
||||
|
||||
do {
|
||||
if (++head->hgen == 0x7FFFFFFF)
|
||||
head->hgen = 1;
|
||||
} while (--i > 0 && cls_bpf_get(tp, head->hgen));
|
||||
if (i == 0)
|
||||
pr_err("Insufficient number of handles\n");
|
||||
|
||||
return i;
|
||||
if (unlikely(i == 0)) {
|
||||
pr_err("Insufficient number of handles\n");
|
||||
handle = 0;
|
||||
} else {
|
||||
handle = head->hgen;
|
||||
}
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
||||
|
@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
|
||||
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
|
||||
new->peer.peer_hmacs = NULL;
|
||||
|
||||
sctp_auth_key_put(asoc->asoc_shared_key);
|
||||
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
|
@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
|
||||
static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
|
||||
struct sock_iocb *siocb)
|
||||
{
|
||||
if (!is_sync_kiocb(iocb))
|
||||
BUG();
|
||||
|
||||
siocb->kiocb = iocb;
|
||||
iocb->private = siocb;
|
||||
return siocb;
|
||||
|
@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
|
||||
if (!rdev->ops->get_key)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
||||
return -ENOENT;
|
||||
|
||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
|
||||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (pairwise && mac_addr &&
|
||||
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
||||
return -ENOENT;
|
||||
|
||||
err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
|
||||
get_key_callback);
|
||||
|
||||
@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
|
||||
wdev_lock(dev->ieee80211_ptr);
|
||||
err = nl80211_key_allowed(dev->ieee80211_ptr);
|
||||
|
||||
if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
|
||||
if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
|
||||
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
|
||||
err = -ENOENT;
|
||||
|
||||
|
@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ieee80211_is_mgmt(fc)) {
|
||||
if (ieee80211_has_order(fc))
|
||||
hdrlen += IEEE80211_HT_CTL_LEN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ieee80211_is_ctl(fc)) {
|
||||
/*
|
||||
* ACK and CTS are 10 bytes, all others 16. To see how
|
||||
|
@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
|
||||
|
||||
/* iterate over two elements */
|
||||
assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
|
||||
next_key == 2);
|
||||
(next_key == 1 || next_key == 2));
|
||||
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
|
||||
next_key == 1);
|
||||
(next_key == 1 || next_key == 2));
|
||||
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
|
||||
errno == ENOENT);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user