Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Use after free in vlan, from Cong Wang.

 2) Handle NAPI poll with a zero budget properly in mlx5 driver, from
    Saeed Mahameed.

 3) If DMA mapping fails in mlx5 driver, NULL out page, from Inbar
    Karmy.

 4) Handle overrun in RX FIFO of sun4i CAN driver, from Gerhard
    Bertelsmann.

 5) Missing return in mdb and vlan prepare phase of DSA layer, from
    Vivien Didelot.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  vlan: fix a use-after-free in vlan_device_event()
  net: dsa: return after vlan prepare phase
  net: dsa: return after mdb prepare phase
  can: ifi: Fix transmitter delay calculation
  tcp: fix tcp_fastretrans_alert warning
  tcp: gso: avoid refcount_t warning from tcp_gso_segment()
  can: peak: Add support for new PCIe/M2 CAN FD interfaces
  can: sun4i: handle overrun in RX FIFO
  can: c_can: don't indicate triple sampling support for D_CAN
  net/mlx5e: Increase Striding RQ minimum size limit to 4 multi-packet WQEs
  net/mlx5e: Set page to null in case dma mapping fails
  net/mlx5e: Fix napi poll with zero budget
  net/mlx5: Cancel health poll before sending panic teardown command
  net/mlx5: Loop over temp list to release delay events
  rds: ib: Fix NULL pointer dereference in debug code
This commit is contained in:
Linus Torvalds 2017-11-11 09:10:39 -08:00
commit b39545684a
15 changed files with 68 additions and 34 deletions

View File

@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;

View File

@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;

View File

@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
priv->base + IFI_CANFD_TDELAY);
tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
tdc &= IFI_CANFD_TDELAY_MASK;
writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,

View File

@ -29,14 +29,19 @@
#include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024)
@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}
};

View File

@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
/* reset the CAN IP by entering reset mode
* ignoring timeout error
*/
set_reset_mode(dev);
set_normal_mode(dev);
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & SUN4I_INT_RBUF_VLD) {
/* receive interrupt */
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
/* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);

View File

@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx,
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:

View File

@ -67,7 +67,7 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6

View File

@ -215,22 +215,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
struct page *page;
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
page = dev_alloc_pages(rq->buff.page_order);
if (unlikely(!page))
dma_info->page = dev_alloc_pages(rq->buff.page_order);
if (unlikely(!dma_info->page))
return -ENOMEM;
dma_info->addr = dma_map_page(rq->pdev, page, 0,
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page);
put_page(dma_info->page);
dma_info->page = NULL;
return -ENOMEM;
}
dma_info->page = page;
return 0;
}

View File

@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
bool busy = false;
int work_done;
int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
}
busy |= c->rq.post_wqes(&c->rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
if (work_done == budget)
if (budget && work_done == budget)
work_done--;
}

View File

@ -1482,9 +1482,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
return -EAGAIN;
}
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev);
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
mlx5_start_health_poll(dev);
return ret;
}

View File

@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
}
if (event == NETDEV_DOWN &&
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
struct net_device *tmp;
LIST_HEAD(close_list);
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
/* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;

View File

@ -133,6 +133,8 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
if (err)
return err;
}
return 0;
}
for_each_set_bit(port, group, ds->num_ports)
@ -180,6 +182,8 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
if (err)
return err;
}
return 0;
}
for_each_set_bit(port, members, ds->num_ports)

View File

@ -2615,7 +2615,6 @@ void tcp_simple_retransmit(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int mss = tcp_current_mss(sk);
u32 prior_lost = tp->lost_out;
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
@ -2632,7 +2631,7 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_clear_retrans_hints_partial(tp);
if (prior_lost == tp->lost_out)
if (!tp->lost_out)
return;
if (tcp_is_reno(tp))

View File

@ -149,11 +149,19 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
* is freed by GSO engine
*/
if (copy_destructor) {
int delta;
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
sum_truesize += skb->truesize;
refcount_add(sum_truesize - gso_skb->truesize,
&skb->sk->sk_wmem_alloc);
delta = sum_truesize - gso_skb->truesize;
/* In some pathological cases, delta can be negative.
* We need to either use refcount_add() or refcount_sub_and_test()
*/
if (likely(delta >= 0))
refcount_add(delta, &skb->sk->sk_wmem_alloc);
else
WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
}
delta = htonl(oldlen + (skb_tail_pointer(skb) -

View File

@ -410,14 +410,14 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
break;
}
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
(long) ib_sg_dma_address(
ic->i_cm_id->device,
&recv->r_frag->f_sg),
ret);
&recv->r_frag->f_sg));
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "