forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) OpenVswitch's lookup_datapath() returns error pointers, so don't check against NULL. From Jiri Pirko. 2) pfkey_compile_policy() code path tries to do a GFP_KERNEL allocation under RCU locks, fix by using GFP_ATOMIC when necessary. From Nikolay Aleksandrov. 3) phy_suspend() indirectly passes uninitialized data into the ethtool get wake-on-land implementations. Fix from Sebastian Hesselbarth. 4) CPSW driver unregisters CPTS twice, fix from Benedikt Spranger. 5) If SKB allocation of reply packet fails, vxlan's arp_reduce() defers a NULL pointer. Fix from David Stevens. 6) IPV6 neigh handling in vxlan doesn't validate the destination address properly, and it builds a packet with the src and dst reversed. Fix also from David Stevens. 7) Fix spinlock recursion during subscription failures in TIPC stack, from Erik Hugne. 8) Revert buggy conversion of davinci_emac to devm_request_irq, from Chrstian Riesch. 9) Wrong flags passed into forwarding database netlink notifications, from Nicolas Dichtel. 10) The netpoll neighbour soliciation handler checks wrong ethertype, needs to be ETH_P_IPV6 rather than ETH_P_ARP. Fix from Li RongQing. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (34 commits) tipc: fix spinlock recursion bug for failed subscriptions vxlan: fix nonfunctional neigh_reduce() net: davinci_emac: Fix rollback of emac_dev_open() net: davinci_emac: Replace devm_request_irq with request_irq netpoll: fix the skb check in pkt_is_ns net: micrel : ks8851-ml: add vdd-supply support ip6mr: fix mfc notification flags ipmr: fix mfc notification flags rtnetlink: fix fdb notification flags tcp: syncookies: do not use getnstimeofday() netlink: fix setsockopt in mmap examples in documentation openvswitch: Correctly report flow used times for first 5 minutes after boot. via-rhine: Disable device in error path ATHEROS-ATL1E: Convert iounmap to pci_iounmap vxlan: fix potential NULL dereference in arp_reduce() cnic: Update version to 2.5.20 and copyright year. cnic,bnx2i,bnx2fc: Fix inconsistent use of page size cnic: Use proper ulp_ops for per device operations. net: cdc_ncm: fix control message ordering ipv6: ip6_append_data_mtu do not handle the mtu of the second fragment properly ...
This commit is contained in:
commit
8a1094462c
@ -7,3 +7,4 @@ Required properties:
|
||||
|
||||
Optional properties:
|
||||
- local-mac-address : Ethernet mac address to use
|
||||
- vdd-supply: supply for Ethernet mac
|
||||
|
@ -226,9 +226,9 @@ Ring setup:
|
||||
void *rx_ring, *tx_ring;
|
||||
|
||||
/* Configure ring parameters */
|
||||
if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
|
||||
if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
|
||||
exit(1);
|
||||
if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
|
||||
if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
|
||||
exit(1)
|
||||
|
||||
/* Calculate size of each individual ring */
|
||||
|
@ -4545,6 +4545,7 @@ M: Greg Rose <gregory.v.rose@intel.com>
|
||||
M: Alex Duyck <alexander.h.duyck@intel.com>
|
||||
M: John Ronciak <john.ronciak@intel.com>
|
||||
M: Mitch Williams <mitch.a.williams@intel.com>
|
||||
M: Linux NICS <linux.nics@intel.com>
|
||||
L: e1000-devel@lists.sourceforge.net
|
||||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
|
@ -16,16 +16,6 @@ config CAPI_TRACE
|
||||
This will increase the size of the kernelcapi module by 20 KB.
|
||||
If unsure, say Y.
|
||||
|
||||
config ISDN_CAPI_MIDDLEWARE
|
||||
bool "CAPI2.0 Middleware support"
|
||||
depends on TTY
|
||||
help
|
||||
This option will enhance the capabilities of the /dev/capi20
|
||||
interface. It will provide a means of moving a data connection,
|
||||
established via the usual /dev/capi20 interface to a special tty
|
||||
device. If you want to use pppd with pppdcapiplugin to dial up to
|
||||
your ISP, say Y here.
|
||||
|
||||
config ISDN_CAPI_CAPI20
|
||||
tristate "CAPI2.0 /dev/capi support"
|
||||
help
|
||||
@ -34,6 +24,16 @@ config ISDN_CAPI_CAPI20
|
||||
standardized libcapi20 to access this functionality. You should say
|
||||
Y/M here.
|
||||
|
||||
config ISDN_CAPI_MIDDLEWARE
|
||||
bool "CAPI2.0 Middleware support"
|
||||
depends on ISDN_CAPI_CAPI20 && TTY
|
||||
help
|
||||
This option will enhance the capabilities of the /dev/capi20
|
||||
interface. It will provide a means of moving a data connection,
|
||||
established via the usual /dev/capi20 interface to a special tty
|
||||
device. If you want to use pppd with pppdcapiplugin to dial up to
|
||||
your ISP, say Y here.
|
||||
|
||||
config ISDN_CAPI_CAPIDRV
|
||||
tristate "CAPI2.0 capidrv interface support"
|
||||
depends on ISDN_I4L
|
||||
|
@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* shared register for the high 32 bits, so only a single, aligned,
|
||||
* 4 GB physical address range can be used for descriptors.
|
||||
*/
|
||||
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
|
||||
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
|
||||
} else {
|
||||
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
err = dma_set_coherent_mask(&pdev->dev,
|
||||
DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"No usable DMA config, aborting\n");
|
||||
goto out_pci_disable;
|
||||
}
|
||||
dev_err(&pdev->dev, "No usable DMA config, aborting\n");
|
||||
goto out_pci_disable;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2436,7 +2436,7 @@ err_reset:
|
||||
err_register:
|
||||
err_sw_init:
|
||||
err_eeprom:
|
||||
iounmap(adapter->hw.hw_addr);
|
||||
pci_iounmap(pdev, adapter->hw.hw_addr);
|
||||
err_init_netdev:
|
||||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
|
||||
unregister_netdev(netdev);
|
||||
atl1e_free_ring_resources(adapter);
|
||||
atl1e_force_ps(&adapter->hw);
|
||||
iounmap(adapter->hw.hw_addr);
|
||||
pci_iounmap(pdev, adapter->hw.hw_addr);
|
||||
pci_release_regions(pdev);
|
||||
free_netdev(netdev);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* cnic.c: Broadcom CNIC core network driver.
|
||||
*
|
||||
* Copyright (c) 2006-2013 Broadcom Corporation
|
||||
* Copyright (c) 2006-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
|
||||
while (retry < 3) {
|
||||
rc = 0;
|
||||
rcu_read_lock();
|
||||
ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
|
||||
ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
|
||||
if (ulp_ops)
|
||||
rc = ulp_ops->iscsi_nl_send_msg(
|
||||
cp->ulp_handle[CNIC_ULP_ISCSI],
|
||||
@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
|
||||
|
||||
for (i = 0; i < dma->num_pages; i++) {
|
||||
if (dma->pg_arr[i]) {
|
||||
dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
|
||||
dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
dma->pg_arr[i], dma->pg_map_arr[i]);
|
||||
dma->pg_arr[i] = NULL;
|
||||
}
|
||||
@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
|
||||
BNX2_PAGE_SIZE,
|
||||
CNIC_PAGE_SIZE,
|
||||
&dma->pg_map_arr[i],
|
||||
GFP_ATOMIC);
|
||||
if (dma->pg_arr[i] == NULL)
|
||||
@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
|
||||
if (!use_pg_tbl)
|
||||
return 0;
|
||||
|
||||
dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
|
||||
~(BNX2_PAGE_SIZE - 1);
|
||||
dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
|
||||
~(CNIC_PAGE_SIZE - 1);
|
||||
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
|
||||
&dma->pgtbl_map, GFP_ATOMIC);
|
||||
if (dma->pgtbl == NULL)
|
||||
@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
|
||||
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
|
||||
int i, k, arr_size;
|
||||
|
||||
cp->ctx_blk_size = BNX2_PAGE_SIZE;
|
||||
cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
|
||||
cp->ctx_blk_size = CNIC_PAGE_SIZE;
|
||||
cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
|
||||
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
|
||||
sizeof(struct cnic_ctx);
|
||||
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
|
||||
@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
|
||||
for (i = 0; i < cp->ctx_blks; i++) {
|
||||
cp->ctx_arr[i].ctx =
|
||||
dma_alloc_coherent(&dev->pcidev->dev,
|
||||
BNX2_PAGE_SIZE,
|
||||
CNIC_PAGE_SIZE,
|
||||
&cp->ctx_arr[i].mapping,
|
||||
GFP_KERNEL);
|
||||
if (cp->ctx_arr[i].ctx == NULL)
|
||||
@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
|
||||
if (udev->l2_ring)
|
||||
return 0;
|
||||
|
||||
udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
|
||||
udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
|
||||
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
|
||||
&udev->l2_ring_map,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
|
||||
return -ENOMEM;
|
||||
|
||||
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
|
||||
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
|
||||
udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
|
||||
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
|
||||
&udev->l2_buf_map,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
|
||||
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
|
||||
TX_MAX_TSS_RINGS + 1);
|
||||
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
|
||||
PAGE_MASK;
|
||||
CNIC_PAGE_MASK;
|
||||
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
|
||||
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
|
||||
else
|
||||
@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
|
||||
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
|
||||
|
||||
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
|
||||
PAGE_MASK;
|
||||
CNIC_PAGE_MASK;
|
||||
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
|
||||
|
||||
uinfo->name = "bnx2x_cnic";
|
||||
@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
|
||||
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
|
||||
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
|
||||
|
||||
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
|
||||
PAGE_SIZE;
|
||||
pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
|
||||
CNIC_PAGE_SIZE;
|
||||
|
||||
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
||||
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
|
||||
n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
|
||||
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
|
||||
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
|
||||
|
||||
@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
|
||||
goto error;
|
||||
}
|
||||
|
||||
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
|
||||
pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
|
||||
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
|
||||
BNX2X_ISCSI_R2TQE_SIZE;
|
||||
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
|
||||
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
|
||||
hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
|
||||
pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
|
||||
hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
|
||||
cp->num_cqs = req1->num_cqs;
|
||||
|
||||
if (!dev->max_iscsi_conn)
|
||||
@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
|
||||
req1->rq_num_wqes);
|
||||
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
|
||||
PAGE_SIZE);
|
||||
CNIC_PAGE_SIZE);
|
||||
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
|
||||
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
|
||||
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
|
||||
req1->num_tasks_per_conn);
|
||||
@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
|
||||
req1->rq_buffer_size);
|
||||
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
|
||||
PAGE_SIZE);
|
||||
CNIC_PAGE_SIZE);
|
||||
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
|
||||
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
|
||||
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
|
||||
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
|
||||
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
|
||||
req1->num_tasks_per_conn);
|
||||
@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
|
||||
/* init Xstorm RAM */
|
||||
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
|
||||
PAGE_SIZE);
|
||||
CNIC_PAGE_SIZE);
|
||||
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
|
||||
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
|
||||
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
|
||||
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
|
||||
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
|
||||
req1->num_tasks_per_conn);
|
||||
@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
|
||||
|
||||
/* init Cstorm RAM */
|
||||
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
|
||||
PAGE_SIZE);
|
||||
CNIC_PAGE_SIZE);
|
||||
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
|
||||
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
|
||||
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
|
||||
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
|
||||
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
|
||||
req1->num_tasks_per_conn);
|
||||
@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
|
||||
}
|
||||
|
||||
ctx->cid = cid;
|
||||
pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
|
||||
pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
|
||||
|
||||
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
|
||||
pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
|
||||
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
|
||||
pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
|
||||
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
|
||||
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
|
||||
/* TSTORM requires the base address of RQ DB & not PTE */
|
||||
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
|
||||
req2->rq_page_table_addr_lo & PAGE_MASK;
|
||||
req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
|
||||
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
|
||||
req2->rq_page_table_addr_hi;
|
||||
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
|
||||
@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
|
||||
/* CSTORM and USTORM initialization is different, CSTORM requires
|
||||
* CQ DB base & not PTE addr */
|
||||
ictx->cstorm_st_context.cq_db_base.lo =
|
||||
req1->cq_page_table_addr_lo & PAGE_MASK;
|
||||
req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
|
||||
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
|
||||
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
|
||||
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
|
||||
@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
|
||||
u16 hw_cons, sw_cons;
|
||||
struct cnic_uio_dev *udev = cp->udev;
|
||||
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
|
||||
(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
|
||||
(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
|
||||
u32 cmd;
|
||||
int comp = 0;
|
||||
|
||||
@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
|
||||
int rc;
|
||||
|
||||
mutex_lock(&cnic_lock);
|
||||
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
|
||||
ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
|
||||
lockdep_is_held(&cnic_lock));
|
||||
if (ulp_ops && ulp_ops->cnic_get_stats)
|
||||
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
|
||||
else
|
||||
@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
|
||||
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
|
||||
u32 val;
|
||||
|
||||
memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
|
||||
memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
|
||||
|
||||
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
|
||||
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
|
||||
@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
|
||||
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
|
||||
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
|
||||
|
||||
rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
|
||||
rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
|
||||
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
|
||||
dma_addr_t buf_map;
|
||||
int n = (i % cp->l2_rx_ring_size) + 1;
|
||||
@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
|
||||
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
|
||||
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
|
||||
}
|
||||
val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
|
||||
val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
|
||||
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
|
||||
rxbd->rx_bd_haddr_hi = val;
|
||||
|
||||
val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
|
||||
val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
|
||||
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
|
||||
rxbd->rx_bd_haddr_lo = val;
|
||||
|
||||
@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
|
||||
|
||||
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
|
||||
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
|
||||
if (BNX2_PAGE_BITS > 12)
|
||||
if (CNIC_PAGE_BITS > 12)
|
||||
val |= (12 - 8) << 4;
|
||||
else
|
||||
val |= (BNX2_PAGE_BITS - 8) << 4;
|
||||
val |= (CNIC_PAGE_BITS - 8) << 4;
|
||||
|
||||
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
|
||||
|
||||
@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
|
||||
|
||||
/* Initialize the kernel work queue context. */
|
||||
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
|
||||
(BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
|
||||
(CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
|
||||
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
|
||||
|
||||
val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
|
||||
val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
|
||||
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
|
||||
|
||||
val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
|
||||
val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
|
||||
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
|
||||
|
||||
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
|
||||
@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
|
||||
|
||||
/* Initialize the kernel complete queue context. */
|
||||
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
|
||||
(BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
|
||||
(CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
|
||||
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
|
||||
|
||||
val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
|
||||
val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
|
||||
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
|
||||
|
||||
val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
|
||||
val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
|
||||
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
|
||||
|
||||
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
|
||||
@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
|
||||
u32 cli = cp->ethdev->iscsi_l2_client_id;
|
||||
u32 val;
|
||||
|
||||
memset(txbd, 0, BNX2_PAGE_SIZE);
|
||||
memset(txbd, 0, CNIC_PAGE_SIZE);
|
||||
|
||||
buf_map = udev->l2_buf_map;
|
||||
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
|
||||
@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
|
||||
struct bnx2x *bp = netdev_priv(dev->netdev);
|
||||
struct cnic_uio_dev *udev = cp->udev;
|
||||
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
|
||||
BNX2_PAGE_SIZE);
|
||||
CNIC_PAGE_SIZE);
|
||||
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
|
||||
(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
|
||||
(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
|
||||
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
|
||||
int i;
|
||||
u32 cli = cp->ethdev->iscsi_l2_client_id;
|
||||
@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
|
||||
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
|
||||
}
|
||||
|
||||
val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
|
||||
val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
|
||||
rxbd->addr_hi = cpu_to_le32(val);
|
||||
data->rx.bd_page_base.hi = cpu_to_le32(val);
|
||||
|
||||
val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
|
||||
val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
|
||||
rxbd->addr_lo = cpu_to_le32(val);
|
||||
data->rx.bd_page_base.lo = cpu_to_le32(val);
|
||||
|
||||
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
|
||||
val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
|
||||
val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
|
||||
rxcqe->addr_hi = cpu_to_le32(val);
|
||||
data->rx.cqe_page_base.hi = cpu_to_le32(val);
|
||||
|
||||
val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
|
||||
val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
|
||||
rxcqe->addr_lo = cpu_to_le32(val);
|
||||
data->rx.cqe_page_base.lo = cpu_to_le32(val);
|
||||
|
||||
@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
|
||||
msleep(10);
|
||||
}
|
||||
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
|
||||
rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
|
||||
memset(rx_ring, 0, BNX2_PAGE_SIZE);
|
||||
rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
|
||||
memset(rx_ring, 0, CNIC_PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int cnic_register_netdev(struct cnic_dev *dev)
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* cnic.h: Broadcom CNIC core network driver.
|
||||
*
|
||||
* Copyright (c) 2006-2013 Broadcom Corporation
|
||||
* Copyright (c) 2006-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
/* cnic.c: Broadcom CNIC core network driver.
|
||||
*
|
||||
* Copyright (c) 2006-2013 Broadcom Corporation
|
||||
* Copyright (c) 2006-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* cnic_if.h: Broadcom CNIC core network driver.
|
||||
*
|
||||
* Copyright (c) 2006-2013 Broadcom Corporation
|
||||
* Copyright (c) 2006-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -14,8 +14,8 @@
|
||||
|
||||
#include "bnx2x/bnx2x_mfw_req.h"
|
||||
|
||||
#define CNIC_MODULE_VERSION "2.5.19"
|
||||
#define CNIC_MODULE_RELDATE "December 19, 2013"
|
||||
#define CNIC_MODULE_VERSION "2.5.20"
|
||||
#define CNIC_MODULE_RELDATE "March 14, 2014"
|
||||
|
||||
#define CNIC_ULP_RDMA 0
|
||||
#define CNIC_ULP_ISCSI 1
|
||||
@ -24,6 +24,16 @@
|
||||
#define MAX_CNIC_ULP_TYPE_EXT 3
|
||||
#define MAX_CNIC_ULP_TYPE 4
|
||||
|
||||
/* Use CPU native page size up to 16K for cnic ring sizes. */
|
||||
#if (PAGE_SHIFT > 14)
|
||||
#define CNIC_PAGE_BITS 14
|
||||
#else
|
||||
#define CNIC_PAGE_BITS PAGE_SHIFT
|
||||
#endif
|
||||
#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
|
||||
#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
|
||||
#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
|
||||
|
||||
struct kwqe {
|
||||
u32 kwqe_op_flag;
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/eeprom_93cx6.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
@ -83,6 +84,7 @@ union ks8851_tx_hdr {
|
||||
* @rc_rxqcr: Cached copy of KS_RXQCR.
|
||||
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
|
||||
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
|
||||
* @vdd_reg: Optional regulator supplying the chip
|
||||
*
|
||||
* The @lock ensures that the chip is protected when certain operations are
|
||||
* in progress. When the read or write packet transfer is in progress, most
|
||||
@ -130,6 +132,7 @@ struct ks8851_net {
|
||||
struct spi_transfer spi_xfer2[2];
|
||||
|
||||
struct eeprom_93cx6 eeprom;
|
||||
struct regulator *vdd_reg;
|
||||
};
|
||||
|
||||
static int msg_enable;
|
||||
@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
|
||||
ks->spidev = spi;
|
||||
ks->tx_space = 6144;
|
||||
|
||||
ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
|
||||
if (IS_ERR(ks->vdd_reg)) {
|
||||
ret = PTR_ERR(ks->vdd_reg);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_reg;
|
||||
} else {
|
||||
ret = regulator_enable(ks->vdd_reg);
|
||||
if (ret) {
|
||||
dev_err(&spi->dev, "regulator enable fail: %d\n",
|
||||
ret);
|
||||
goto err_reg_en;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
mutex_init(&ks->lock);
|
||||
spin_lock_init(&ks->statelock);
|
||||
|
||||
@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
|
||||
err_netdev:
|
||||
free_irq(ndev->irq, ks);
|
||||
|
||||
err_id:
|
||||
err_irq:
|
||||
err_id:
|
||||
if (!IS_ERR(ks->vdd_reg))
|
||||
regulator_disable(ks->vdd_reg);
|
||||
err_reg_en:
|
||||
if (!IS_ERR(ks->vdd_reg))
|
||||
regulator_put(ks->vdd_reg);
|
||||
err_reg:
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
|
||||
|
||||
unregister_netdev(priv->netdev);
|
||||
free_irq(spi->irq, priv);
|
||||
if (!IS_ERR(priv->vdd_reg)) {
|
||||
regulator_disable(priv->vdd_reg);
|
||||
regulator_put(priv->vdd_reg);
|
||||
}
|
||||
free_netdev(priv->netdev);
|
||||
|
||||
return 0;
|
||||
|
@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||
goto clean_ale_ret;
|
||||
}
|
||||
|
||||
if (cpts_register(&pdev->dev, priv->cpts,
|
||||
data->cpts_clock_mult, data->cpts_clock_shift))
|
||||
dev_err(priv->dev, "error registering cpts device\n");
|
||||
|
||||
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
|
||||
&ss_res->start, ndev->irq);
|
||||
|
||||
|
@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&ctlr->lock, flags);
|
||||
if (ctlr->state != CPDMA_STATE_ACTIVE) {
|
||||
if (ctlr->state == CPDMA_STATE_TEARDOWN) {
|
||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
|
||||
unsigned timeout;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
if (chan->state != CPDMA_STATE_ACTIVE) {
|
||||
if (chan->state == CPDMA_STATE_TEARDOWN) {
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
|
||||
struct device *emac_dev = &ndev->dev;
|
||||
u32 cnt;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
int q, m, ret;
|
||||
int res_num = 0, irq_num = 0;
|
||||
int i = 0;
|
||||
int k = 0;
|
||||
struct emac_priv *priv = netdev_priv(ndev);
|
||||
|
||||
pm_runtime_get(&priv->pdev->dev);
|
||||
@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
|
||||
}
|
||||
|
||||
/* Request IRQ */
|
||||
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
|
||||
res_num))) {
|
||||
for (irq_num = res->start; irq_num <= res->end; irq_num++) {
|
||||
dev_err(emac_dev, "Request IRQ %d\n", irq_num);
|
||||
if (request_irq(irq_num, emac_irq, 0, ndev->name,
|
||||
ndev)) {
|
||||
dev_err(emac_dev,
|
||||
"DaVinci EMAC: request_irq() failed\n");
|
||||
ret = -EBUSY;
|
||||
|
||||
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
|
||||
for (i = res->start; i <= res->end; i++) {
|
||||
if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
|
||||
0, ndev->name, ndev))
|
||||
goto rollback;
|
||||
}
|
||||
}
|
||||
k++;
|
||||
res_num++;
|
||||
}
|
||||
/* prepare counters for rollback in case of an error */
|
||||
res_num--;
|
||||
irq_num--;
|
||||
|
||||
/* Start/Enable EMAC hardware */
|
||||
emac_hw_enable(priv);
|
||||
@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
|
||||
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
|
||||
dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
|
||||
ret = -EBUSY;
|
||||
err:
|
||||
emac_int_disable(priv);
|
||||
napi_disable(&priv->napi);
|
||||
|
||||
rollback:
|
||||
for (q = res_num; q >= 0; q--) {
|
||||
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
|
||||
/* at the first iteration, irq_num is already set to the
|
||||
* right value
|
||||
*/
|
||||
if (q != res_num)
|
||||
irq_num = res->end;
|
||||
|
||||
for (m = irq_num; m >= res->start; m--)
|
||||
free_irq(m, ndev);
|
||||
}
|
||||
cpdma_ctlr_stop(priv->dma);
|
||||
pm_runtime_put(&priv->pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
@ -1659,6 +1680,9 @@ err:
|
||||
*/
|
||||
static int emac_dev_stop(struct net_device *ndev)
|
||||
{
|
||||
struct resource *res;
|
||||
int i = 0;
|
||||
int irq_num;
|
||||
struct emac_priv *priv = netdev_priv(ndev);
|
||||
struct device *emac_dev = &ndev->dev;
|
||||
|
||||
@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
|
||||
if (priv->phydev)
|
||||
phy_disconnect(priv->phydev);
|
||||
|
||||
/* Free IRQ */
|
||||
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
|
||||
for (irq_num = res->start; irq_num <= res->end; irq_num++)
|
||||
free_irq(irq_num, priv->ndev);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (netif_msg_drv(priv))
|
||||
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
|
||||
|
||||
|
@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev,
|
||||
"32-bit PCI DMA addresses not supported by the card!?\n");
|
||||
goto err_out;
|
||||
goto err_out_pci_disable;
|
||||
}
|
||||
|
||||
/* sanity check */
|
||||
@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
(pci_resource_len(pdev, 1) < io_size)) {
|
||||
rc = -EIO;
|
||||
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
|
||||
goto err_out;
|
||||
goto err_out_pci_disable;
|
||||
}
|
||||
|
||||
pioaddr = pci_resource_start(pdev, 0);
|
||||
@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev = alloc_etherdev(sizeof(struct rhine_private));
|
||||
if (!dev) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
goto err_out_pci_disable;
|
||||
}
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
@ -1084,6 +1084,8 @@ err_out_free_res:
|
||||
pci_release_regions(pdev);
|
||||
err_out_free_netdev:
|
||||
free_netdev(dev);
|
||||
err_out_pci_disable:
|
||||
pci_disable_device(pdev);
|
||||
err_out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach);
|
||||
int phy_suspend(struct phy_device *phydev)
|
||||
{
|
||||
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
|
||||
struct ethtool_wolinfo wol;
|
||||
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
|
||||
|
||||
/* If the device has WOL enabled, we cannot suspend the PHY */
|
||||
wol.cmd = ETHTOOL_GWOL;
|
||||
phy_ethtool_get_wol(phydev, &wol);
|
||||
if (wol.wolopts)
|
||||
return -EBUSY;
|
||||
|
@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
|
||||
static int cdc_ncm_setup(struct usbnet *dev)
|
||||
{
|
||||
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
|
||||
struct usb_cdc_ncm_ntb_parameters ncm_parm;
|
||||
u32 val;
|
||||
u8 flags;
|
||||
u8 iface_no;
|
||||
@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
|
||||
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
|
||||
USB_TYPE_CLASS | USB_DIR_IN
|
||||
|USB_RECIP_INTERFACE,
|
||||
0, iface_no, &ncm_parm,
|
||||
sizeof(ncm_parm));
|
||||
0, iface_no, &ctx->ncm_parm,
|
||||
sizeof(ctx->ncm_parm));
|
||||
if (err < 0) {
|
||||
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
|
||||
return err; /* GET_NTB_PARAMETERS is required */
|
||||
}
|
||||
|
||||
/* read correct set of parameters according to device mode */
|
||||
ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
|
||||
ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
|
||||
ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
|
||||
ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
|
||||
ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
|
||||
ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
|
||||
ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
|
||||
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
|
||||
ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
|
||||
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
|
||||
/* devices prior to NCM Errata shall set this field to zero */
|
||||
ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
|
||||
ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
|
||||
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
|
||||
ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
|
||||
|
||||
/* there are some minor differences in NCM and MBIM defaults */
|
||||
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
|
||||
@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
|
||||
}
|
||||
|
||||
/* inform device about NTB input size changes */
|
||||
if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
|
||||
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
|
||||
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
|
||||
|
||||
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
|
||||
@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
|
||||
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
|
||||
CDC_NCM_NTB_MAX_SIZE_TX);
|
||||
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
|
||||
|
||||
/* Adding a pad byte here simplifies the handling in
|
||||
* cdc_ncm_fill_tx_frame, by making tx_max always
|
||||
* represent the real skb max size.
|
||||
*/
|
||||
if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
|
||||
ctx->tx_max++;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -439,6 +430,10 @@ advance:
|
||||
goto error2;
|
||||
}
|
||||
|
||||
/* initialize data interface */
|
||||
if (cdc_ncm_setup(dev))
|
||||
goto error2;
|
||||
|
||||
/* configure data interface */
|
||||
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
|
||||
if (temp) {
|
||||
@ -453,12 +448,6 @@ advance:
|
||||
goto error2;
|
||||
}
|
||||
|
||||
/* initialize data interface */
|
||||
if (cdc_ncm_setup(dev)) {
|
||||
dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
|
||||
goto error2;
|
||||
}
|
||||
|
||||
usb_set_intfdata(ctx->data, dev);
|
||||
usb_set_intfdata(ctx->control, dev);
|
||||
|
||||
@ -475,6 +464,15 @@ advance:
|
||||
dev->hard_mtu = ctx->tx_max;
|
||||
dev->rx_urb_size = ctx->rx_max;
|
||||
|
||||
/* cdc_ncm_setup will override dwNtbOutMaxSize if it is
|
||||
* outside the sane range. Adding a pad byte here if necessary
|
||||
* simplifies the handling in cdc_ncm_fill_tx_frame, making
|
||||
* tx_max always represent the real skb max size.
|
||||
*/
|
||||
if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
|
||||
ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
|
||||
ctx->tx_max++;
|
||||
|
||||
return 0;
|
||||
|
||||
error2:
|
||||
|
@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
neigh_release(n);
|
||||
|
||||
if (reply == NULL)
|
||||
goto out;
|
||||
|
||||
skb_reset_mac_header(reply);
|
||||
__skb_pull(reply, skb_network_offset(reply));
|
||||
reply->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
@ -1339,15 +1342,103 @@ out:
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
static struct sk_buff *vxlan_na_create(struct sk_buff *request,
|
||||
struct neighbour *n, bool isrouter)
|
||||
{
|
||||
struct net_device *dev = request->dev;
|
||||
struct sk_buff *reply;
|
||||
struct nd_msg *ns, *na;
|
||||
struct ipv6hdr *pip6;
|
||||
u8 *daddr;
|
||||
int na_olen = 8; /* opt hdr + ETH_ALEN for target */
|
||||
int ns_olen;
|
||||
int i, len;
|
||||
|
||||
if (dev == NULL)
|
||||
return NULL;
|
||||
|
||||
len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
|
||||
sizeof(*na) + na_olen + dev->needed_tailroom;
|
||||
reply = alloc_skb(len, GFP_ATOMIC);
|
||||
if (reply == NULL)
|
||||
return NULL;
|
||||
|
||||
reply->protocol = htons(ETH_P_IPV6);
|
||||
reply->dev = dev;
|
||||
skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
|
||||
skb_push(reply, sizeof(struct ethhdr));
|
||||
skb_set_mac_header(reply, 0);
|
||||
|
||||
ns = (struct nd_msg *)skb_transport_header(request);
|
||||
|
||||
daddr = eth_hdr(request)->h_source;
|
||||
ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
|
||||
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
|
||||
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
|
||||
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Ethernet header */
|
||||
ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
|
||||
ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
|
||||
eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
|
||||
reply->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
skb_pull(reply, sizeof(struct ethhdr));
|
||||
skb_set_network_header(reply, 0);
|
||||
skb_put(reply, sizeof(struct ipv6hdr));
|
||||
|
||||
/* IPv6 header */
|
||||
|
||||
pip6 = ipv6_hdr(reply);
|
||||
memset(pip6, 0, sizeof(struct ipv6hdr));
|
||||
pip6->version = 6;
|
||||
pip6->priority = ipv6_hdr(request)->priority;
|
||||
pip6->nexthdr = IPPROTO_ICMPV6;
|
||||
pip6->hop_limit = 255;
|
||||
pip6->daddr = ipv6_hdr(request)->saddr;
|
||||
pip6->saddr = *(struct in6_addr *)n->primary_key;
|
||||
|
||||
skb_pull(reply, sizeof(struct ipv6hdr));
|
||||
skb_set_transport_header(reply, 0);
|
||||
|
||||
na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
|
||||
|
||||
/* Neighbor Advertisement */
|
||||
memset(na, 0, sizeof(*na)+na_olen);
|
||||
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
|
||||
na->icmph.icmp6_router = isrouter;
|
||||
na->icmph.icmp6_override = 1;
|
||||
na->icmph.icmp6_solicited = 1;
|
||||
na->target = ns->target;
|
||||
ether_addr_copy(&na->opt[2], n->ha);
|
||||
na->opt[0] = ND_OPT_TARGET_LL_ADDR;
|
||||
na->opt[1] = na_olen >> 3;
|
||||
|
||||
na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
|
||||
&pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
|
||||
csum_partial(na, sizeof(*na)+na_olen, 0));
|
||||
|
||||
pip6->payload_len = htons(sizeof(*na)+na_olen);
|
||||
|
||||
skb_push(reply, sizeof(struct ipv6hdr));
|
||||
|
||||
reply->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
return reply;
|
||||
}
|
||||
|
||||
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct neighbour *n;
|
||||
union vxlan_addr ipa;
|
||||
struct nd_msg *msg;
|
||||
const struct ipv6hdr *iphdr;
|
||||
const struct in6_addr *saddr, *daddr;
|
||||
struct nd_msg *msg;
|
||||
struct inet6_dev *in6_dev = NULL;
|
||||
struct neighbour *n;
|
||||
struct inet6_dev *in6_dev;
|
||||
|
||||
in6_dev = __in6_dev_get(dev);
|
||||
if (!in6_dev)
|
||||
@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
saddr = &iphdr->saddr;
|
||||
daddr = &iphdr->daddr;
|
||||
|
||||
if (ipv6_addr_loopback(daddr) ||
|
||||
ipv6_addr_is_multicast(daddr))
|
||||
goto out;
|
||||
|
||||
msg = (struct nd_msg *)skb_transport_header(skb);
|
||||
if (msg->icmph.icmp6_code != 0 ||
|
||||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
|
||||
goto out;
|
||||
|
||||
n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
|
||||
if (ipv6_addr_loopback(daddr) ||
|
||||
ipv6_addr_is_multicast(&msg->target))
|
||||
goto out;
|
||||
|
||||
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
|
||||
|
||||
if (n) {
|
||||
struct vxlan_fdb *f;
|
||||
struct sk_buff *reply;
|
||||
|
||||
if (!(n->nud_state & NUD_CONNECTED)) {
|
||||
neigh_release(n);
|
||||
@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
|
||||
!!in6_dev->cnf.forwarding,
|
||||
true, false, false);
|
||||
reply = vxlan_na_create(skb, n,
|
||||
!!(f ? f->flags & NTF_ROUTER : 0));
|
||||
|
||||
neigh_release(n);
|
||||
|
||||
if (reply == NULL)
|
||||
goto out;
|
||||
|
||||
if (netif_rx_ni(reply) == NET_RX_DROP)
|
||||
dev->stats.rx_dropped++;
|
||||
|
||||
} else if (vxlan->flags & VXLAN_F_L3MISS) {
|
||||
ipa.sin6.sin6_addr = *daddr;
|
||||
ipa.sa.sa_family = AF_INET6;
|
||||
union vxlan_addr ipa = {
|
||||
.sin6.sin6_addr = msg->target,
|
||||
.sa.sa_family = AF_INET6,
|
||||
};
|
||||
|
||||
vxlan_ip_miss(dev, &ipa);
|
||||
}
|
||||
|
||||
|
@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
|
||||
if (reg != last_val)
|
||||
return true;
|
||||
|
||||
udelay(1);
|
||||
last_val = reg;
|
||||
if ((reg & 0x7E7FFFEF) == 0x00702400)
|
||||
continue;
|
||||
@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
udelay(1);
|
||||
} while (count-- > 0);
|
||||
|
||||
return false;
|
||||
|
@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
||||
|
||||
ATH_TXBUF_RESET(bf);
|
||||
|
||||
if (tid) {
|
||||
if (tid && ieee80211_is_data_present(hdr->frame_control)) {
|
||||
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
|
||||
seqno = tid->seq_next;
|
||||
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
|
||||
@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
txq->stopped = true;
|
||||
}
|
||||
|
||||
if (txctl->an)
|
||||
if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
|
||||
tid = ath_get_skb_tid(sc, txctl->an, skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
|
||||
|
@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
|
||||
if (pkt_pad == NULL)
|
||||
return -ENOMEM;
|
||||
ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
|
||||
if (unlikely(ret < 0))
|
||||
if (unlikely(ret < 0)) {
|
||||
kfree_skb(pkt_pad);
|
||||
return ret;
|
||||
}
|
||||
memcpy(pkt_pad->data,
|
||||
pkt->data + pkt->len - tail_chop,
|
||||
tail_chop);
|
||||
|
@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
|
||||
|
||||
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
|
||||
|
||||
rt2800_bbp_write(rt2x00dev, 69, 0x0d);
|
||||
rt2800_bbp_write(rt2x00dev, 70, 0x06);
|
||||
rt2800_bbp_write(rt2x00dev, 69, 0x12);
|
||||
rt2800_bbp_write(rt2x00dev, 73, 0x13);
|
||||
rt2800_bbp_write(rt2x00dev, 75, 0x46);
|
||||
rt2800_bbp_write(rt2x00dev, 76, 0x28);
|
||||
|
||||
rt2800_bbp_write(rt2x00dev, 77, 0x59);
|
||||
|
||||
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
|
||||
|
||||
rt2800_bbp_write(rt2x00dev, 79, 0x13);
|
||||
rt2800_bbp_write(rt2x00dev, 80, 0x05);
|
||||
rt2800_bbp_write(rt2x00dev, 81, 0x33);
|
||||
@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
|
||||
if (rt2x00_rt(rt2x00dev, RT5392)) {
|
||||
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
|
||||
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
|
||||
rt2800_bbp_write(rt2x00dev, 148, 0x84);
|
||||
}
|
||||
|
||||
rt2800_disable_unused_dac_adc(rt2x00dev);
|
||||
|
@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
|
||||
mp_req->mp_resp_bd = NULL;
|
||||
}
|
||||
if (mp_req->req_buf) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
mp_req->req_buf,
|
||||
mp_req->req_buf_dma);
|
||||
mp_req->req_buf = NULL;
|
||||
}
|
||||
if (mp_req->resp_buf) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
mp_req->resp_buf,
|
||||
mp_req->resp_buf_dma);
|
||||
mp_req->resp_buf = NULL;
|
||||
@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
|
||||
mp_req->req_len = sizeof(struct fcp_cmnd);
|
||||
io_req->data_xfer_len = mp_req->req_len;
|
||||
mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
&mp_req->req_buf_dma,
|
||||
GFP_ATOMIC);
|
||||
if (!mp_req->req_buf) {
|
||||
@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
&mp_req->resp_buf_dma,
|
||||
GFP_ATOMIC);
|
||||
if (!mp_req->resp_buf) {
|
||||
@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
bnx2fc_free_mp_resc(io_req);
|
||||
return FAILED;
|
||||
}
|
||||
memset(mp_req->req_buf, 0, PAGE_SIZE);
|
||||
memset(mp_req->resp_buf, 0, PAGE_SIZE);
|
||||
memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
|
||||
memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
|
||||
|
||||
/* Allocate and map mp_req_bd and mp_resp_bd */
|
||||
sz = sizeof(struct fcoe_bd_ctx);
|
||||
@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
mp_req_bd = mp_req->mp_req_bd;
|
||||
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
|
||||
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
|
||||
mp_req_bd->buf_len = PAGE_SIZE;
|
||||
mp_req_bd->buf_len = CNIC_PAGE_SIZE;
|
||||
mp_req_bd->flags = 0;
|
||||
|
||||
/*
|
||||
@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
addr = mp_req->resp_buf_dma;
|
||||
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
|
||||
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
|
||||
mp_resp_bd->buf_len = PAGE_SIZE;
|
||||
mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
|
||||
mp_resp_bd->flags = 0;
|
||||
|
||||
return SUCCESS;
|
||||
|
@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
|
||||
/* Allocate and map SQ */
|
||||
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
|
||||
tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
&tgt->sq_dma, GFP_KERNEL);
|
||||
@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
|
||||
/* Allocate and map CQ */
|
||||
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
|
||||
tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
&tgt->cq_dma, GFP_KERNEL);
|
||||
@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
|
||||
/* Allocate and map RQ and RQ PBL */
|
||||
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
|
||||
tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
&tgt->rq_dma, GFP_KERNEL);
|
||||
@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
}
|
||||
memset(tgt->rq, 0, tgt->rq_mem_size);
|
||||
|
||||
tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
&tgt->rq_pbl_dma, GFP_KERNEL);
|
||||
@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
}
|
||||
|
||||
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
|
||||
num_pages = tgt->rq_mem_size / PAGE_SIZE;
|
||||
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = tgt->rq_dma;
|
||||
pbl = (u32 *)tgt->rq_pbl;
|
||||
|
||||
@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
pbl++;
|
||||
*pbl = (u32)((u64)page >> 32);
|
||||
pbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Allocate and map XFERQ */
|
||||
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
|
||||
tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
|
||||
&tgt->xferq_dma, GFP_KERNEL);
|
||||
@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
|
||||
/* Allocate and map CONFQ & CONFQ PBL */
|
||||
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
|
||||
tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
|
||||
&tgt->confq_dma, GFP_KERNEL);
|
||||
@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
memset(tgt->confq, 0, tgt->confq_mem_size);
|
||||
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
|
||||
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
tgt->confq_pbl_size,
|
||||
@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
}
|
||||
|
||||
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
|
||||
num_pages = tgt->confq_mem_size / PAGE_SIZE;
|
||||
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = tgt->confq_dma;
|
||||
pbl = (u32 *)tgt->confq_pbl;
|
||||
|
||||
@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
pbl++;
|
||||
*pbl = (u32)((u64)page >> 32);
|
||||
pbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Allocate and map ConnDB */
|
||||
@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
|
||||
/* Allocate and map LCQ */
|
||||
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
|
||||
tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
|
||||
&tgt->lcq_dma, GFP_KERNEL);
|
||||
|
@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
|
||||
* yield integral num of page buffers
|
||||
*/
|
||||
/* adjust SQ */
|
||||
num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
|
||||
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
|
||||
if (hba->max_sqes < num_elements_per_pg)
|
||||
hba->max_sqes = num_elements_per_pg;
|
||||
else if (hba->max_sqes % num_elements_per_pg)
|
||||
@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
|
||||
~(num_elements_per_pg - 1);
|
||||
|
||||
/* adjust CQ */
|
||||
num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
|
||||
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
|
||||
if (hba->max_cqes < num_elements_per_pg)
|
||||
hba->max_cqes = num_elements_per_pg;
|
||||
else if (hba->max_cqes % num_elements_per_pg)
|
||||
@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
|
||||
~(num_elements_per_pg - 1);
|
||||
|
||||
/* adjust RQ */
|
||||
num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
|
||||
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
|
||||
if (hba->max_rqes < num_elements_per_pg)
|
||||
hba->max_rqes = num_elements_per_pg;
|
||||
else if (hba->max_rqes % num_elements_per_pg)
|
||||
@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
|
||||
/* SQ page table */
|
||||
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
|
||||
num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
|
||||
num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = ep->qp.sq_phys;
|
||||
|
||||
if (cnic_dev_10g)
|
||||
@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) ((u64) page >> 32);
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
} else {
|
||||
/* PTE is written in big endian format for
|
||||
* 5706/5708/5709 devices */
|
||||
@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) page;
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* RQ page table */
|
||||
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
|
||||
num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
|
||||
num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = ep->qp.rq_phys;
|
||||
|
||||
if (cnic_dev_10g)
|
||||
@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) ((u64) page >> 32);
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
} else {
|
||||
/* PTE is written in big endian format for
|
||||
* 5706/5708/5709 devices */
|
||||
@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) page;
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* CQ page table */
|
||||
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
|
||||
num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
|
||||
num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = ep->qp.cq_phys;
|
||||
|
||||
if (cnic_dev_10g)
|
||||
@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) ((u64) page >> 32);
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
} else {
|
||||
/* PTE is written in big endian format for
|
||||
* 5706/5708/5709 devices */
|
||||
@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
|
||||
ptbl++;
|
||||
*ptbl = (u32) page;
|
||||
ptbl++;
|
||||
page += PAGE_SIZE;
|
||||
page += CNIC_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
|
||||
/* Allocate page table memory for SQ which is page aligned */
|
||||
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
|
||||
ep->qp.sq_mem_size =
|
||||
(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
ep->qp.sq_pgtbl_size =
|
||||
(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
(ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
ep->qp.sq_pgtbl_size =
|
||||
(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
|
||||
ep->qp.sq_pgtbl_virt =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
|
||||
@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
|
||||
/* Allocate page table memory for CQ which is page aligned */
|
||||
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
|
||||
ep->qp.cq_mem_size =
|
||||
(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
ep->qp.cq_pgtbl_size =
|
||||
(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
(ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
ep->qp.cq_pgtbl_size =
|
||||
(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
|
||||
ep->qp.cq_pgtbl_virt =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
|
||||
@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
|
||||
/* Allocate page table memory for RQ which is page aligned */
|
||||
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
|
||||
ep->qp.rq_mem_size =
|
||||
(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
ep->qp.rq_pgtbl_size =
|
||||
(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
(ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
ep->qp.rq_pgtbl_size =
|
||||
(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
(ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
|
||||
ep->qp.rq_pgtbl_virt =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
|
||||
@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
|
||||
bnx2i_adjust_qp_size(hba);
|
||||
|
||||
iscsi_init.flags =
|
||||
ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
|
||||
(CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
|
||||
if (en_tcp_dack)
|
||||
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
|
||||
iscsi_init.reserved0 = 0;
|
||||
@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
|
||||
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
|
||||
iscsi_init.num_ccells_per_conn = hba->num_ccell;
|
||||
iscsi_init.num_tasks_per_conn = hba->max_sqes;
|
||||
iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
|
||||
iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
|
||||
iscsi_init.sq_num_wqes = hba->max_sqes;
|
||||
iscsi_init.cq_log_wqes_per_page =
|
||||
(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
|
||||
(u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
|
||||
iscsi_init.cq_num_wqes = hba->max_cqes;
|
||||
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
|
||||
(PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
(CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
|
||||
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
|
||||
(PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
(CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
|
||||
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
|
||||
iscsi_init.rq_num_wqes = hba->max_rqes;
|
||||
|
||||
|
@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
|
||||
struct iscsi_bd *mp_bdt;
|
||||
u64 addr;
|
||||
|
||||
hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
&hba->mp_bd_dma, GFP_KERNEL);
|
||||
if (!hba->mp_bd_tbl) {
|
||||
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
|
||||
@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
|
||||
goto out;
|
||||
}
|
||||
|
||||
hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
CNIC_PAGE_SIZE,
|
||||
&hba->dummy_buf_dma, GFP_KERNEL);
|
||||
if (!hba->dummy_buffer) {
|
||||
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
hba->mp_bd_tbl, hba->mp_bd_dma);
|
||||
hba->mp_bd_tbl = NULL;
|
||||
rc = -1;
|
||||
@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
|
||||
addr = (unsigned long) hba->dummy_buf_dma;
|
||||
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
|
||||
mp_bdt->buffer_addr_hi = addr >> 32;
|
||||
mp_bdt->buffer_length = PAGE_SIZE;
|
||||
mp_bdt->buffer_length = CNIC_PAGE_SIZE;
|
||||
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
|
||||
ISCSI_BD_FIRST_IN_BD_CHAIN;
|
||||
out:
|
||||
@ -565,12 +566,12 @@ out:
|
||||
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
|
||||
{
|
||||
if (hba->mp_bd_tbl) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
hba->mp_bd_tbl, hba->mp_bd_dma);
|
||||
hba->mp_bd_tbl = NULL;
|
||||
}
|
||||
if (hba->dummy_buffer) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
hba->dummy_buffer, hba->dummy_buf_dma);
|
||||
hba->dummy_buffer = NULL;
|
||||
}
|
||||
@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
|
||||
struct bnx2i_conn *bnx2i_conn)
|
||||
{
|
||||
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
bnx2i_conn->gen_pdu.resp_bd_tbl,
|
||||
bnx2i_conn->gen_pdu.resp_bd_dma);
|
||||
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
|
||||
}
|
||||
|
||||
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
bnx2i_conn->gen_pdu.req_bd_tbl,
|
||||
bnx2i_conn->gen_pdu.req_bd_dma);
|
||||
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
|
||||
@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
|
||||
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
|
||||
|
||||
bnx2i_conn->gen_pdu.req_bd_tbl =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
|
||||
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
|
||||
goto login_req_bd_tbl_failure;
|
||||
|
||||
bnx2i_conn->gen_pdu.resp_bd_tbl =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
&bnx2i_conn->gen_pdu.resp_bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
|
||||
@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
|
||||
return 0;
|
||||
|
||||
login_resp_bd_tbl_failure:
|
||||
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
|
||||
bnx2i_conn->gen_pdu.req_bd_tbl,
|
||||
bnx2i_conn->gen_pdu.req_bd_dma);
|
||||
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
|
||||
|
@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
||||
* Allocate a security structure to the xp->security field; the security
|
||||
* field is initialized to NULL when the xfrm_policy is allocated.
|
||||
* Return 0 if operation was successful (memory to allocate, legal context)
|
||||
* @gfp is to specify the context for the allocation
|
||||
* @xfrm_policy_clone_security:
|
||||
* @old_ctx contains an existing xfrm_sec_ctx.
|
||||
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
|
||||
@ -1683,7 +1684,7 @@ struct security_operations {
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *sec_ctx);
|
||||
struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
|
||||
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
|
||||
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
|
||||
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
|
||||
@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
|
||||
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx);
|
||||
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
|
||||
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
|
||||
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
|
||||
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
|
||||
@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
|
||||
|
||||
#else /* CONFIG_SECURITY_NETWORK_XFRM */
|
||||
|
||||
static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
|
||||
static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *sec_ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -88,6 +88,7 @@
|
||||
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
|
||||
|
||||
struct cdc_ncm_ctx {
|
||||
struct usb_cdc_ncm_ntb_parameters ncm_parm;
|
||||
struct hrtimer tx_timer;
|
||||
struct tasklet_struct bh;
|
||||
|
||||
|
@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
#include <linux/ktime.h>
|
||||
|
||||
/* Syncookies use a monotonic timer which increments every 64 seconds.
|
||||
/* Syncookies use a monotonic timer which increments every 60 seconds.
|
||||
* This counter is used both as a hash input and partially encoded into
|
||||
* the cookie value. A cookie is only validated further if the delta
|
||||
* between the current counter value and the encoded one is less than this,
|
||||
* i.e. a sent cookie is valid only at most for 128 seconds (or less if
|
||||
* i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
|
||||
* the counter advances immediately after a cookie is generated).
|
||||
*/
|
||||
#define MAX_SYNCOOKIE_AGE 2
|
||||
|
||||
static inline u32 tcp_cookie_time(void)
|
||||
{
|
||||
struct timespec now;
|
||||
getnstimeofday(&now);
|
||||
return now.tv_sec >> 6; /* 64 seconds granularity */
|
||||
u64 val = get_jiffies_64();
|
||||
|
||||
do_div(val, 60 * HZ);
|
||||
return val;
|
||||
}
|
||||
|
||||
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
|
||||
|
@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
|
||||
struct nd_msg *msg;
|
||||
struct ipv6hdr *hdr;
|
||||
|
||||
if (skb->protocol != htons(ETH_P_ARP))
|
||||
if (skb->protocol != htons(ETH_P_IPV6))
|
||||
return false;
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
|
||||
return false;
|
||||
|
@ -2121,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
|
||||
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
u8 *addr, u32 pid, u32 seq,
|
||||
int type, unsigned int flags)
|
||||
int type, unsigned int flags,
|
||||
int nlflags)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct ndmsg *ndm;
|
||||
|
||||
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
|
||||
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
@ -2164,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
goto errout;
|
||||
@ -2389,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
|
||||
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
|
||||
portid, seq,
|
||||
RTM_NEWNEIGH, NTF_SELF);
|
||||
RTM_NEWNEIGH, NTF_SELF,
|
||||
NLM_F_MULTI);
|
||||
if (err < 0)
|
||||
return err;
|
||||
skip:
|
||||
|
@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
|
||||
u32 portid, u32 seq, struct mfc_cache *c, int cmd)
|
||||
u32 portid, u32 seq, struct mfc_cache *c, int cmd,
|
||||
int flags)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct rtmsg *rtm;
|
||||
int err;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
|
||||
if (nlh == NULL)
|
||||
return -EMSGSIZE;
|
||||
|
||||
@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
|
||||
if (skb == NULL)
|
||||
goto errout;
|
||||
|
||||
err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
|
||||
err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (ipmr_fill_mroute(mrt, skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
mfc, RTM_NEWROUTE) < 0)
|
||||
mfc, RTM_NEWROUTE,
|
||||
NLM_F_MULTI) < 0)
|
||||
goto done;
|
||||
next_entry:
|
||||
e++;
|
||||
@ -2382,7 +2384,8 @@ next_entry:
|
||||
if (ipmr_fill_mroute(mrt, skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
mfc, RTM_NEWROUTE) < 0) {
|
||||
mfc, RTM_NEWROUTE,
|
||||
NLM_F_MULTI) < 0) {
|
||||
spin_unlock_bh(&mfc_unres_lock);
|
||||
goto done;
|
||||
}
|
||||
|
@ -1101,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
|
||||
unsigned int fragheaderlen,
|
||||
struct sk_buff *skb,
|
||||
struct rt6_info *rt,
|
||||
bool pmtuprobe)
|
||||
unsigned int orig_mtu)
|
||||
{
|
||||
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
|
||||
if (skb == NULL) {
|
||||
/* first fragment, reserve header_len */
|
||||
*mtu = *mtu - rt->dst.header_len;
|
||||
*mtu = orig_mtu - rt->dst.header_len;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* this fragment is not first, the headers
|
||||
* space is regarded as data space.
|
||||
*/
|
||||
*mtu = min(*mtu, pmtuprobe ?
|
||||
rt->dst.dev->mtu :
|
||||
dst_mtu(rt->dst.path));
|
||||
*mtu = orig_mtu;
|
||||
}
|
||||
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
|
||||
+ fragheaderlen - sizeof(struct frag_hdr);
|
||||
@ -1132,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct inet_cork *cork;
|
||||
struct sk_buff *skb, *skb_prev = NULL;
|
||||
unsigned int maxfraglen, fragheaderlen, mtu;
|
||||
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
|
||||
int exthdrlen;
|
||||
int dst_exthdrlen;
|
||||
int hh_len;
|
||||
@ -1214,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
|
||||
dst_exthdrlen = 0;
|
||||
mtu = cork->fragsize;
|
||||
}
|
||||
orig_mtu = mtu;
|
||||
|
||||
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
|
||||
|
||||
@ -1311,8 +1310,7 @@ alloc_new_skb:
|
||||
if (skb == NULL || skb_prev == NULL)
|
||||
ip6_append_data_mtu(&mtu, &maxfraglen,
|
||||
fragheaderlen, skb, rt,
|
||||
np->pmtudisc >=
|
||||
IPV6_PMTUDISC_PROBE);
|
||||
orig_mtu);
|
||||
|
||||
skb_prev = skb;
|
||||
|
||||
|
@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
|
||||
}
|
||||
|
||||
static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
|
||||
u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
|
||||
u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
|
||||
int flags)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct rtmsg *rtm;
|
||||
int err;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
|
||||
if (nlh == NULL)
|
||||
return -EMSGSIZE;
|
||||
|
||||
@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
|
||||
if (skb == NULL)
|
||||
goto errout;
|
||||
|
||||
err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
|
||||
err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (ip6mr_fill_mroute(mrt, skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
mfc, RTM_NEWROUTE) < 0)
|
||||
mfc, RTM_NEWROUTE,
|
||||
NLM_F_MULTI) < 0)
|
||||
goto done;
|
||||
next_entry:
|
||||
e++;
|
||||
@ -2476,7 +2478,8 @@ next_entry:
|
||||
if (ip6mr_fill_mroute(mrt, skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
mfc, RTM_NEWROUTE) < 0) {
|
||||
mfc, RTM_NEWROUTE,
|
||||
NLM_F_MULTI) < 0) {
|
||||
spin_unlock_bh(&mfc_unres_lock);
|
||||
goto done;
|
||||
}
|
||||
|
@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
|
||||
static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct xfrm_user_sec_ctx *uctx = NULL;
|
||||
int ctx_size = sec_ctx->sadb_x_ctx_len;
|
||||
|
||||
uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL);
|
||||
uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
|
||||
|
||||
if (!uctx)
|
||||
return NULL;
|
||||
@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
|
||||
|
||||
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
|
||||
if (sec_ctx != NULL) {
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
|
||||
|
||||
if (!uctx)
|
||||
goto out;
|
||||
@ -2231,14 +2232,14 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
|
||||
|
||||
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
|
||||
if (sec_ctx != NULL) {
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
|
||||
|
||||
if (!uctx) {
|
||||
err = -ENOBUFS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = security_xfrm_policy_alloc(&xp->security, uctx);
|
||||
err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL);
|
||||
kfree(uctx);
|
||||
|
||||
if (err)
|
||||
@ -2335,12 +2336,12 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
|
||||
|
||||
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
|
||||
if (sec_ctx != NULL) {
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
|
||||
struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
|
||||
|
||||
if (!uctx)
|
||||
return -ENOMEM;
|
||||
|
||||
err = security_xfrm_policy_alloc(&pol_ctx, uctx);
|
||||
err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL);
|
||||
kfree(uctx);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3239,8 +3240,8 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
|
||||
}
|
||||
if ((*dir = verify_sec_ctx_len(p)))
|
||||
goto out;
|
||||
uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
|
||||
*dir = security_xfrm_policy_alloc(&xp->security, uctx);
|
||||
uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC);
|
||||
*dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC);
|
||||
kfree(uctx);
|
||||
|
||||
if (*dir)
|
||||
|
@ -1174,7 +1174,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
|
||||
struct datapath *dp;
|
||||
|
||||
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
|
||||
if (!dp)
|
||||
if (IS_ERR(dp))
|
||||
return;
|
||||
|
||||
WARN(dp->user_features, "Dropping previously announced user features\n");
|
||||
@ -1762,11 +1762,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
int bucket = cb->args[0], skip = cb->args[1];
|
||||
int i, j = 0;
|
||||
|
||||
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
|
||||
if (!dp)
|
||||
return -ENODEV;
|
||||
|
||||
rcu_read_lock();
|
||||
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
|
||||
if (!dp) {
|
||||
rcu_read_unlock();
|
||||
return -ENODEV;
|
||||
}
|
||||
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
|
||||
struct vport *vport;
|
||||
|
||||
|
@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
|
||||
|
||||
if ((flow->key.eth.type == htons(ETH_P_IP) ||
|
||||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
|
||||
flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
|
||||
flow->key.ip.proto == IPPROTO_TCP &&
|
||||
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
|
||||
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
|
||||
@ -91,7 +92,7 @@ static void stats_read(struct flow_stats *stats,
|
||||
unsigned long *used, __be16 *tcp_flags)
|
||||
{
|
||||
spin_lock(&stats->lock);
|
||||
if (time_after(stats->used, *used))
|
||||
if (!*used || time_after(stats->used, *used))
|
||||
*used = stats->used;
|
||||
*tcp_flags |= stats->tcp_flags;
|
||||
ovs_stats->n_packets += stats->packet_count;
|
||||
|
@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
|
||||
*
|
||||
* Called with subscriber lock held.
|
||||
*/
|
||||
static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
|
||||
struct tipc_subscriber *subscriber)
|
||||
{
|
||||
static int subscr_subscribe(struct tipc_subscr *s,
|
||||
struct tipc_subscriber *subscriber,
|
||||
struct tipc_subscription **sub_p) {
|
||||
struct tipc_subscription *sub;
|
||||
int swap;
|
||||
|
||||
@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
|
||||
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
|
||||
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
|
||||
subscr_cancel(s, subscriber);
|
||||
return NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Refuse subscription if global limit exceeded */
|
||||
if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
|
||||
pr_warn("Subscription rejected, limit reached (%u)\n",
|
||||
TIPC_MAX_SUBSCRIPTIONS);
|
||||
subscr_terminate(subscriber);
|
||||
return NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Allocate subscription object */
|
||||
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
|
||||
if (!sub) {
|
||||
pr_warn("Subscription rejected, no memory\n");
|
||||
subscr_terminate(subscriber);
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Initialize subscription object */
|
||||
@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
|
||||
(sub->seq.lower > sub->seq.upper)) {
|
||||
pr_warn("Subscription rejected, illegal request\n");
|
||||
kfree(sub);
|
||||
subscr_terminate(subscriber);
|
||||
return NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
INIT_LIST_HEAD(&sub->nameseq_list);
|
||||
list_add(&sub->subscription_list, &subscriber->subscription_list);
|
||||
@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
|
||||
(Handler)subscr_timeout, (unsigned long)sub);
|
||||
k_start_timer(&sub->timer, sub->timeout);
|
||||
}
|
||||
|
||||
return sub;
|
||||
*sub_p = sub;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handle one termination request for the subscriber */
|
||||
@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
|
||||
void *usr_data, void *buf, size_t len)
|
||||
{
|
||||
struct tipc_subscriber *subscriber = usr_data;
|
||||
struct tipc_subscription *sub;
|
||||
struct tipc_subscription *sub = NULL;
|
||||
|
||||
spin_lock_bh(&subscriber->lock);
|
||||
sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
|
||||
if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
|
||||
spin_unlock_bh(&subscriber->lock);
|
||||
subscr_terminate(subscriber);
|
||||
return;
|
||||
}
|
||||
if (sub)
|
||||
tipc_nametbl_subscribe(sub);
|
||||
spin_unlock_bh(&subscriber->lock);
|
||||
|
@ -1221,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs
|
||||
return 0;
|
||||
|
||||
uctx = nla_data(rt);
|
||||
return security_xfrm_policy_alloc(&pol->security, uctx);
|
||||
return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
|
||||
@ -1626,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (rt) {
|
||||
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
|
||||
|
||||
err = security_xfrm_policy_alloc(&ctx, uctx);
|
||||
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1928,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (rt) {
|
||||
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
|
||||
|
||||
err = security_xfrm_policy_alloc(&ctx, uctx);
|
||||
err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *sec_ctx)
|
||||
struct xfrm_user_sec_ctx *sec_ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
|
||||
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
|
||||
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *sec_ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
|
||||
return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL(security_xfrm_policy_alloc);
|
||||
|
||||
|
@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
|
||||
if (flags[i] == SBLABEL_MNT)
|
||||
continue;
|
||||
rc = security_context_to_sid(mount_options[i],
|
||||
strlen(mount_options[i]), &sid);
|
||||
strlen(mount_options[i]), &sid, GFP_KERNEL);
|
||||
if (rc) {
|
||||
printk(KERN_WARNING "SELinux: security_context_to_sid"
|
||||
"(%s) failed for (dev %s, type %s) errno=%d\n",
|
||||
@ -2489,7 +2489,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
|
||||
if (flags[i] == SBLABEL_MNT)
|
||||
continue;
|
||||
len = strlen(mount_options[i]);
|
||||
rc = security_context_to_sid(mount_options[i], len, &sid);
|
||||
rc = security_context_to_sid(mount_options[i], len, &sid,
|
||||
GFP_KERNEL);
|
||||
if (rc) {
|
||||
printk(KERN_WARNING "SELinux: security_context_to_sid"
|
||||
"(%s) failed for (dev %s, type %s) errno=%d\n",
|
||||
@ -2893,7 +2894,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = security_context_to_sid(value, size, &newsid);
|
||||
rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
|
||||
if (rc == -EINVAL) {
|
||||
if (!capable(CAP_MAC_ADMIN)) {
|
||||
struct audit_buffer *ab;
|
||||
@ -3050,7 +3051,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
|
||||
if (!value || !size)
|
||||
return -EACCES;
|
||||
|
||||
rc = security_context_to_sid((void *)value, size, &newsid);
|
||||
rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -5529,7 +5530,7 @@ static int selinux_setprocattr(struct task_struct *p,
|
||||
str[size-1] = 0;
|
||||
size--;
|
||||
}
|
||||
error = security_context_to_sid(value, size, &sid);
|
||||
error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
|
||||
if (error == -EINVAL && !strcmp(name, "fscreate")) {
|
||||
if (!capable(CAP_MAC_ADMIN)) {
|
||||
struct audit_buffer *ab;
|
||||
@ -5638,7 +5639,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
|
||||
|
||||
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
|
||||
{
|
||||
return security_context_to_sid(secdata, seclen, secid);
|
||||
return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void selinux_release_secctx(char *secdata, u32 seclen)
|
||||
|
@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext,
|
||||
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
|
||||
|
||||
int security_context_to_sid(const char *scontext, u32 scontext_len,
|
||||
u32 *out_sid);
|
||||
u32 *out_sid, gfp_t gfp);
|
||||
|
||||
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
|
||||
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
|
||||
|
@ -10,7 +10,8 @@
|
||||
#include <net/flow.h>
|
||||
|
||||
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *uctx);
|
||||
struct xfrm_user_sec_ctx *uctx,
|
||||
gfp_t gfp);
|
||||
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
|
||||
struct xfrm_sec_ctx **new_ctxp);
|
||||
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
|
||||
|
@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(buf, size, &sid);
|
||||
length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
|
||||
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
|
||||
objname = namebuf;
|
||||
}
|
||||
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
|
||||
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
|
||||
if (sscanf(buf, "%s %s", con, user) != 2)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(con, strlen(con) + 1, &sid);
|
||||
length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
|
||||
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
|
||||
length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
|
||||
length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
|
||||
GFP_KERNEL);
|
||||
if (length)
|
||||
goto out;
|
||||
|
||||
|
@ -1289,16 +1289,18 @@ out:
|
||||
* @scontext: security context
|
||||
* @scontext_len: length in bytes
|
||||
* @sid: security identifier, SID
|
||||
* @gfp: context for the allocation
|
||||
*
|
||||
* Obtains a SID associated with the security context that
|
||||
* has the string representation specified by @scontext.
|
||||
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
|
||||
* memory is available, or 0 on success.
|
||||
*/
|
||||
int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
|
||||
int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return security_context_to_sid_core(scontext, scontext_len,
|
||||
sid, SECSID_NULL, GFP_KERNEL, 0);
|
||||
sid, SECSID_NULL, gfp, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
|
||||
* xfrm_user_sec_ctx context.
|
||||
*/
|
||||
static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *uctx)
|
||||
struct xfrm_user_sec_ctx *uctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
int rc;
|
||||
const struct task_security_struct *tsec = current_security();
|
||||
@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
|
||||
if (str_len >= PAGE_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
|
||||
ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
|
||||
ctx->ctx_len = str_len;
|
||||
memcpy(ctx->ctx_str, &uctx[1], str_len);
|
||||
ctx->ctx_str[str_len] = '\0';
|
||||
rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
|
||||
rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
|
||||
* LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
|
||||
*/
|
||||
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
||||
struct xfrm_user_sec_ctx *uctx)
|
||||
struct xfrm_user_sec_ctx *uctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return selinux_xfrm_alloc_user(ctxp, uctx);
|
||||
return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
|
||||
int selinux_xfrm_state_alloc(struct xfrm_state *x,
|
||||
struct xfrm_user_sec_ctx *uctx)
|
||||
{
|
||||
return selinux_xfrm_alloc_user(&x->security, uctx);
|
||||
return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user