Merge branches 'cma', 'cxgb4', 'iser', 'misc', 'mlx4', 'mlx5', 'nes', 'ocrdma', 'qib' and 'usnic' into for-next
This commit is contained in:
		
							parent
							
								
									b4a26a2728
								
							
								
									0f0132001f
								
							
								
									fd8b48b22a
								
							
								
									ab576627c8
								
							
								
									ad4885d279
								
							
								
									0861565f50
								
							
								
									d07875bd0d
								
							
								
									09de3f1313
								
							
								
									2f75e12c44
								
							
								
									f809309a25
								
							
						
					
					
						commit
						c9459388d8
					
				| @ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | ||||
| 
 | ||||
| 	/* Initialize network device */ | ||||
| 	if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | ||||
| 		ret = -ENOMEM; | ||||
| 		iounmap(mmio_regs); | ||||
| 		goto bail4; | ||||
| 	} | ||||
| @ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | ||||
| 		goto bail10; | ||||
| 	} | ||||
| 
 | ||||
| 	if (c2_register_device(c2dev)) | ||||
| 	ret = c2_register_device(c2dev); | ||||
| 	if (ret) | ||||
| 		goto bail10; | ||||
| 
 | ||||
| 	return 0; | ||||
|  | ||||
| @ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) | ||||
| 		goto bail4; | ||||
| 
 | ||||
| 	/* Initialize cached the adapter limits */ | ||||
| 	if (c2_rnic_query(c2dev, &c2dev->props)) | ||||
| 	err = c2_rnic_query(c2dev, &c2dev->props); | ||||
| 	if (err) | ||||
| 		goto bail5; | ||||
| 
 | ||||
| 	/* Initialize the PD pool */ | ||||
|  | ||||
| @ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | ||||
| 		goto free_dst; | ||||
| 	} | ||||
| 
 | ||||
| 	neigh_release(neigh); | ||||
| 	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | ||||
| 	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | ||||
| 	window = (__force u16) htons((__force u16)tcph->window); | ||||
|  | ||||
| @ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { | ||||
| 	&dev_attr_board_id | ||||
| }; | ||||
| 
 | ||||
| static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, | ||||
| 				     struct net_device *dev) | ||||
| { | ||||
| 	memcpy(eui, dev->dev_addr, 3); | ||||
| 	memcpy(eui + 5, dev->dev_addr + 3, 3); | ||||
| 	if (vlan_id < 0x1000) { | ||||
| 		eui[3] = vlan_id >> 8; | ||||
| 		eui[4] = vlan_id & 0xff; | ||||
| 	} else { | ||||
| 		eui[3] = 0xff; | ||||
| 		eui[4] = 0xfe; | ||||
| 	} | ||||
| 	eui[0] ^= 2; | ||||
| } | ||||
| 
 | ||||
| static void update_gids_task(struct work_struct *work) | ||||
| { | ||||
| 	struct update_gid_work *gw = container_of(work, struct update_gid_work, work); | ||||
| @ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) | ||||
| 	struct mlx4_cmd_mailbox *mailbox; | ||||
| 	union ib_gid *gids; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 	struct mlx4_dev	*dev = gw->dev->dev; | ||||
| 
 | ||||
| 	mailbox = mlx4_alloc_cmd_mailbox(dev); | ||||
| @ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) | ||||
| 	gids = mailbox->buf; | ||||
| 	memcpy(gids, gw->gids, sizeof(gw->gids)); | ||||
| 
 | ||||
| 	for (i = 1; i < gw->dev->num_ports + 1; i++) { | ||||
| 		if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == | ||||
| 					    IB_LINK_LAYER_ETHERNET) { | ||||
| 			err = mlx4_cmd(dev, mailbox->dma, | ||||
| 				       MLX4_SET_PORT_GID_TABLE << 8 | i, | ||||
| 				       1, MLX4_CMD_SET_PORT, | ||||
| 				       MLX4_CMD_TIME_CLASS_B, | ||||
| 				       MLX4_CMD_WRAPPED); | ||||
| 			if (err) | ||||
| 				pr_warn(KERN_WARNING | ||||
| 					"set port %d command failed\n", i); | ||||
| 		} | ||||
| 	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == | ||||
| 				    IB_LINK_LAYER_ETHERNET) { | ||||
| 		err = mlx4_cmd(dev, mailbox->dma, | ||||
| 			       MLX4_SET_PORT_GID_TABLE << 8 | gw->port, | ||||
| 			       1, MLX4_CMD_SET_PORT, | ||||
| 			       MLX4_CMD_TIME_CLASS_B, | ||||
| 			       MLX4_CMD_WRAPPED); | ||||
| 		if (err) | ||||
| 			pr_warn(KERN_WARNING | ||||
| 				"set port %d command failed\n", gw->port); | ||||
| 	} | ||||
| 
 | ||||
| 	mlx4_free_cmd_mailbox(dev, mailbox); | ||||
| @ -1425,7 +1437,8 @@ free: | ||||
| } | ||||
| 
 | ||||
| static int update_gid_table(struct mlx4_ib_dev *dev, int port, | ||||
| 			    union ib_gid *gid, int clear) | ||||
| 			    union ib_gid *gid, int clear, | ||||
| 			    int default_gid) | ||||
| { | ||||
| 	struct update_gid_work *work; | ||||
| 	int i; | ||||
| @ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | ||||
| 	int found = -1; | ||||
| 	int max_gids; | ||||
| 
 | ||||
| 	max_gids = dev->dev->caps.gid_table_len[port]; | ||||
| 	for (i = 0; i < max_gids; ++i) { | ||||
| 		if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | ||||
| 			    sizeof(*gid))) | ||||
| 			found = i; | ||||
| 
 | ||||
| 		if (clear) { | ||||
| 			if (found >= 0) { | ||||
| 				need_update = 1; | ||||
| 				dev->iboe.gid_table[port - 1][found] = zgid; | ||||
| 				break; | ||||
| 			} | ||||
| 		} else { | ||||
| 			if (found >= 0) | ||||
| 				break; | ||||
| 
 | ||||
| 			if (free < 0 && | ||||
| 			    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, | ||||
| 	if (default_gid) { | ||||
| 		free = 0; | ||||
| 	} else { | ||||
| 		max_gids = dev->dev->caps.gid_table_len[port]; | ||||
| 		for (i = 1; i < max_gids; ++i) { | ||||
| 			if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | ||||
| 				    sizeof(*gid))) | ||||
| 				free = i; | ||||
| 				found = i; | ||||
| 
 | ||||
| 			if (clear) { | ||||
| 				if (found >= 0) { | ||||
| 					need_update = 1; | ||||
| 					dev->iboe.gid_table[port - 1][found] = | ||||
| 						zgid; | ||||
| 					break; | ||||
| 				} | ||||
| 			} else { | ||||
| 				if (found >= 0) | ||||
| 					break; | ||||
| 
 | ||||
| 				if (free < 0 && | ||||
| 				    !memcmp(&dev->iboe.gid_table[port - 1][i], | ||||
| 					    &zgid, sizeof(*gid))) | ||||
| 					free = i; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int reset_gid_table(struct mlx4_ib_dev *dev) | ||||
| static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid) | ||||
| { | ||||
| 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | ||||
| 	mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) | ||||
| { | ||||
| 	struct update_gid_work *work; | ||||
| 
 | ||||
| 
 | ||||
| 	work = kzalloc(sizeof(*work), GFP_ATOMIC); | ||||
| 	if (!work) | ||||
| 		return -ENOMEM; | ||||
| 	memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); | ||||
| 
 | ||||
| 	memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); | ||||
| 	memset(work->gids, 0, sizeof(work->gids)); | ||||
| 	INIT_WORK(&work->work, reset_gids_task); | ||||
| 	work->dev = dev; | ||||
| 	work->port = port; | ||||
| 	queue_work(wq, &work->work); | ||||
| 	return 0; | ||||
| } | ||||
| @ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | ||||
| 	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? | ||||
| 				rdma_vlan_dev_real_dev(event_netdev) : | ||||
| 				event_netdev; | ||||
| 	union ib_gid default_gid; | ||||
| 
 | ||||
| 	mlx4_make_default_gid(real_dev, &default_gid); | ||||
| 
 | ||||
| 	if (!memcmp(gid, &default_gid, sizeof(*gid))) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (event != NETDEV_DOWN && event != NETDEV_UP) | ||||
| 		return 0; | ||||
| @ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | ||||
| 		     (!netif_is_bond_master(real_dev) && | ||||
| 		     (real_dev == iboe->netdevs[port - 1]))) | ||||
| 			update_gid_table(ibdev, port, gid, | ||||
| 					 event == NETDEV_DOWN); | ||||
| 					 event == NETDEV_DOWN, 0); | ||||
| 
 | ||||
| 	spin_unlock(&iboe->lock); | ||||
| 	return 0; | ||||
| @ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | ||||
| 				rdma_vlan_dev_real_dev(dev) : dev; | ||||
| 
 | ||||
| 	iboe = &ibdev->iboe; | ||||
| 	spin_lock(&iboe->lock); | ||||
| 
 | ||||
| 	for (port = 1; port <= MLX4_MAX_PORTS; ++port) | ||||
| 		if ((netif_is_bond_master(real_dev) && | ||||
| @ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | ||||
| 		     (real_dev == iboe->netdevs[port - 1]))) | ||||
| 			break; | ||||
| 
 | ||||
| 	spin_unlock(&iboe->lock); | ||||
| 
 | ||||
| 	if ((port == 0) || (port > MLX4_MAX_PORTS)) | ||||
| 		return 0; | ||||
| 	else | ||||
| @ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | ||||
| 			/*ifa->ifa_address;*/ | ||||
| 			ipv6_addr_set_v4mapped(ifa->ifa_address, | ||||
| 					       (struct in6_addr *)&gid); | ||||
| 			update_gid_table(ibdev, port, &gid, 0); | ||||
| 			update_gid_table(ibdev, port, &gid, 0, 0); | ||||
| 		} | ||||
| 		endfor_ifa(in_dev); | ||||
| 		in_dev_put(in_dev); | ||||
| @ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | ||||
| 		read_lock_bh(&in6_dev->lock); | ||||
| 		list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | ||||
| 			pgid = (union ib_gid *)&ifp->addr; | ||||
| 			update_gid_table(ibdev, port, pgid, 0); | ||||
| 			update_gid_table(ibdev, port, pgid, 0, 0); | ||||
| 		} | ||||
| 		read_unlock_bh(&in6_dev->lock); | ||||
| 		in6_dev_put(in6_dev); | ||||
| @ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, | ||||
| 				 struct  net_device *dev, u8 port) | ||||
| { | ||||
| 	union ib_gid gid; | ||||
| 	mlx4_make_default_gid(dev, &gid); | ||||
| 	update_gid_table(ibdev, port, &gid, 0, 1); | ||||
| } | ||||
| 
 | ||||
| static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | ||||
| { | ||||
| 	struct	net_device *dev; | ||||
| 	struct mlx4_ib_iboe *iboe = &ibdev->iboe; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (reset_gid_table(ibdev)) | ||||
| 		return -1; | ||||
| 	for (i = 1; i <= ibdev->num_ports; ++i) | ||||
| 		if (reset_gid_table(ibdev, i)) | ||||
| 			return -1; | ||||
| 
 | ||||
| 	read_lock(&dev_base_lock); | ||||
| 	spin_lock(&iboe->lock); | ||||
| 
 | ||||
| 	for_each_netdev(&init_net, dev) { | ||||
| 		u8 port = mlx4_ib_get_dev_port(dev, ibdev); | ||||
| @ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | ||||
| 			mlx4_ib_get_dev_addr(dev, ibdev, port); | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock(&iboe->lock); | ||||
| 	read_unlock(&dev_base_lock); | ||||
| 
 | ||||
| 	return 0; | ||||
| @ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | ||||
| 
 | ||||
| 	spin_lock(&iboe->lock); | ||||
| 	mlx4_foreach_ib_transport_port(port, ibdev->dev) { | ||||
| 		enum ib_port_state	port_state = IB_PORT_NOP; | ||||
| 		struct net_device *old_master = iboe->masters[port - 1]; | ||||
| 		struct net_device *curr_netdev; | ||||
| 		struct net_device *curr_master; | ||||
| 
 | ||||
| 		iboe->netdevs[port - 1] = | ||||
| 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); | ||||
| 		if (iboe->netdevs[port - 1]) | ||||
| 			mlx4_ib_set_default_gid(ibdev, | ||||
| 						iboe->netdevs[port - 1], port); | ||||
| 		curr_netdev = iboe->netdevs[port - 1]; | ||||
| 
 | ||||
| 		if (iboe->netdevs[port - 1] && | ||||
| 		    netif_is_bond_slave(iboe->netdevs[port - 1])) { | ||||
| 			rtnl_lock(); | ||||
| 			iboe->masters[port - 1] = netdev_master_upper_dev_get( | ||||
| 				iboe->netdevs[port - 1]); | ||||
| 			rtnl_unlock(); | ||||
| 		} else { | ||||
| 			iboe->masters[port - 1] = NULL; | ||||
| 		} | ||||
| 		curr_master = iboe->masters[port - 1]; | ||||
| 
 | ||||
| 		if (curr_netdev) { | ||||
| 			port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | ||||
| 						IB_PORT_ACTIVE : IB_PORT_DOWN; | ||||
| 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||||
| 		} else { | ||||
| 			reset_gid_table(ibdev, port); | ||||
| 		} | ||||
| 		/* if using bonding/team and a slave port is down, we don't the bond IP
 | ||||
| 		 * based gids in the table since flows that select port by gid may get | ||||
| 		 * the down port. | ||||
| 		 */ | ||||
| 		if (curr_master && (port_state == IB_PORT_DOWN)) { | ||||
| 			reset_gid_table(ibdev, port); | ||||
| 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||||
| 		} | ||||
| 		/* if bonding is used it is possible that we add it to masters
 | ||||
| 		    only after IP address is assigned to the net bonding | ||||
| 		    interface */ | ||||
| 		if (curr_master && (old_master != curr_master)) | ||||
| 		 * only after IP address is assigned to the net bonding | ||||
| 		 * interface. | ||||
| 		*/ | ||||
| 		if (curr_master && (old_master != curr_master)) { | ||||
| 			reset_gid_table(ibdev, port); | ||||
| 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||||
| 			mlx4_ib_get_dev_addr(curr_master, ibdev, port); | ||||
| 		} | ||||
| 
 | ||||
| 		if (!curr_master && (old_master != curr_master)) { | ||||
| 			reset_gid_table(ibdev, port); | ||||
| 			mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||||
| 			mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock(&iboe->lock); | ||||
| @ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | ||||
| 	int i, j; | ||||
| 	int err; | ||||
| 	struct mlx4_ib_iboe *iboe; | ||||
| 	int ib_num_ports = 0; | ||||
| 
 | ||||
| 	pr_info_once("%s", mlx4_ib_version); | ||||
| 
 | ||||
| @ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | ||||
| 				ibdev->counters[i] = -1; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||||
| 		ib_num_ports++; | ||||
| 
 | ||||
| 	spin_lock_init(&ibdev->sm_lock); | ||||
| 	mutex_init(&ibdev->cap_mask_mutex); | ||||
| 
 | ||||
| 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||||
| 	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && | ||||
| 	    ib_num_ports) { | ||||
| 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; | ||||
| 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, | ||||
| 					    MLX4_IB_UC_STEER_QPN_ALIGN, | ||||
| @ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | ||||
| 			} | ||||
| 		} | ||||
| #endif | ||||
| 		for (i = 1 ; i <= ibdev->num_ports ; ++i) | ||||
| 			reset_gid_table(ibdev, i); | ||||
| 		rtnl_lock(); | ||||
| 		mlx4_ib_scan_netdevs(ibdev); | ||||
| 		rtnl_unlock(); | ||||
| 		mlx4_ib_init_gid_table(ibdev); | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| config MLX5_INFINIBAND | ||||
| 	tristate "Mellanox Connect-IB HCA support" | ||||
| 	depends on NETDEVICES && ETHERNET && PCI && X86 | ||||
| 	depends on NETDEVICES && ETHERNET && PCI | ||||
| 	select NET_VENDOR_MELLANOX | ||||
| 	select MLX5_CORE | ||||
| 	---help--- | ||||
|  | ||||
| @ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | ||||
| 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT | | ||||
| 		IB_DEVICE_PORT_ACTIVE_EVENT		| | ||||
| 		IB_DEVICE_SYS_IMAGE_GUID		| | ||||
| 		IB_DEVICE_RC_RNR_NAK_GEN		| | ||||
| 		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||||
| 		IB_DEVICE_RC_RNR_NAK_GEN; | ||||
| 	flags = dev->mdev.caps.flags; | ||||
| 	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | ||||
| 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | ||||
| @ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | ||||
| 						  struct ib_udata *udata) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||||
| 	struct mlx5_ib_alloc_ucontext_req req; | ||||
| 	struct mlx5_ib_alloc_ucontext_req_v2 req; | ||||
| 	struct mlx5_ib_alloc_ucontext_resp resp; | ||||
| 	struct mlx5_ib_ucontext *context; | ||||
| 	struct mlx5_uuar_info *uuari; | ||||
| 	struct mlx5_uar *uars; | ||||
| 	int gross_uuars; | ||||
| 	int num_uars; | ||||
| 	int ver; | ||||
| 	int uuarn; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 	int reqlen; | ||||
| 
 | ||||
| 	if (!dev->ib_active) | ||||
| 		return ERR_PTR(-EAGAIN); | ||||
| 
 | ||||
| 	err = ib_copy_from_udata(&req, udata, sizeof(req)); | ||||
| 	memset(&req, 0, sizeof(req)); | ||||
| 	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | ||||
| 	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||||
| 		ver = 0; | ||||
| 	else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) | ||||
| 		ver = 2; | ||||
| 	else | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 
 | ||||
| 	err = ib_copy_from_udata(&req, udata, reqlen); | ||||
| 	if (err) | ||||
| 		return ERR_PTR(err); | ||||
| 
 | ||||
| 	if (req.flags || req.reserved) | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 
 | ||||
| 	if (req.total_num_uuars > MLX5_MAX_UUARS) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| @ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | ||||
| 	if (err) | ||||
| 		goto out_uars; | ||||
| 
 | ||||
| 	uuari->ver = ver; | ||||
| 	uuari->num_low_latency_uuars = req.num_low_latency_uuars; | ||||
| 	uuari->uars = uars; | ||||
| 	uuari->num_uars = num_uars; | ||||
|  | ||||
| @ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) | ||||
| 
 | ||||
| 	case IB_QPT_UC: | ||||
| 		size += sizeof(struct mlx5_wqe_ctrl_seg) + | ||||
| 			sizeof(struct mlx5_wqe_raddr_seg); | ||||
| 			sizeof(struct mlx5_wqe_raddr_seg) + | ||||
| 			sizeof(struct mlx5_wqe_umr_ctrl_seg) + | ||||
| 			sizeof(struct mlx5_mkey_seg); | ||||
| 		break; | ||||
| 
 | ||||
| 	case IB_QPT_UD: | ||||
| @ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, | ||||
| 		break; | ||||
| 
 | ||||
| 	case MLX5_IB_LATENCY_CLASS_MEDIUM: | ||||
| 		uuarn = alloc_med_class_uuar(uuari); | ||||
| 		if (uuari->ver < 2) | ||||
| 			uuarn = -ENOMEM; | ||||
| 		else | ||||
| 			uuarn = alloc_med_class_uuar(uuari); | ||||
| 		break; | ||||
| 
 | ||||
| 	case MLX5_IB_LATENCY_CLASS_HIGH: | ||||
| 		uuarn = alloc_high_class_uuar(uuari); | ||||
| 		if (uuari->ver < 2) | ||||
| 			uuarn = -ENOMEM; | ||||
| 		else | ||||
| 			uuarn = alloc_high_class_uuar(uuari); | ||||
| 		break; | ||||
| 
 | ||||
| 	case MLX5_IB_LATENCY_CLASS_FAST_PATH: | ||||
| @ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | ||||
| 	int err; | ||||
| 
 | ||||
| 	uuari = &dev->mdev.priv.uuari; | ||||
| 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) | ||||
| 		qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; | ||||
| 	if (init_attr->create_flags) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) | ||||
| 		lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; | ||||
|  | ||||
| @ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { | ||||
| 	__u32	num_low_latency_uuars; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_alloc_ucontext_req_v2 { | ||||
| 	__u32	total_num_uuars; | ||||
| 	__u32	num_low_latency_uuars; | ||||
| 	__u32	flags; | ||||
| 	__u32	reserved; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_alloc_ucontext_resp { | ||||
| 	__u32	qp_tab_size; | ||||
| 	__u32	bf_reg_size; | ||||
|  | ||||
| @ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | ||||
| 	INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); | ||||
| 
 | ||||
| 	/* Initialize network devices */ | ||||
| 	if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) | ||||
| 	netdev = nes_netdev_init(nesdev, mmio_regs); | ||||
| 	if (netdev == NULL) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto bail7; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Register network device */ | ||||
| 	ret = register_netdev(netdev); | ||||
|  | ||||
| @ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, | ||||
| 
 | ||||
| 	is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; | ||||
| 	if (is_vlan) | ||||
| 		netdev = vlan_dev_real_dev(netdev); | ||||
| 		netdev = rdma_vlan_dev_real_dev(netdev); | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | ||||
|  | ||||
| @ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, | ||||
| 					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | ||||
| 						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | ||||
| 	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | ||||
| 					      OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | ||||
| 					      OCRDMA_QP_PARAMS_TCLASS_MASK) >> | ||||
| 						OCRDMA_QP_PARAMS_TCLASS_SHIFT; | ||||
| 
 | ||||
| 	qp_attr->ah_attr.ah_flags = IB_AH_GRH; | ||||
|  | ||||
| @ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | ||||
| 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | ||||
| 	qib_write_kreg(dd, kr_scratch, 0ULL); | ||||
| 
 | ||||
| 	/* ensure previous Tx parameters are not still forced */ | ||||
| 	qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||||
| 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||||
| 		reset_tx_deemphasis_override)); | ||||
| 
 | ||||
| 	if (qib_compat_ddr_negotiate) { | ||||
| 		ppd->cpspec->ibdeltainprog = 1; | ||||
| 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | ||||
|  | ||||
| @ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | ||||
| { | ||||
| 	enum usnic_transport_type trans_type = qp_flow->trans_type; | ||||
| 	int err; | ||||
| 	uint16_t port_num = 0; | ||||
| 
 | ||||
| 	switch (trans_type) { | ||||
| 	case USNIC_TRANSPORT_ROCE_CUSTOM: | ||||
| @ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | ||||
| 	case USNIC_TRANSPORT_IPV4_UDP: | ||||
| 		err = usnic_transport_sock_get_addr(qp_flow->udp.sock, | ||||
| 							NULL, NULL, | ||||
| 							(uint16_t *) id); | ||||
| 							&port_num); | ||||
| 		if (err) | ||||
| 			return err; | ||||
| 		/*
 | ||||
| 		 * Copy port_num to stack first and then to *id, | ||||
| 		 * so that the short to int cast works for little | ||||
| 		 * and big endian systems. | ||||
| 		 */ | ||||
| 		*id = port_num; | ||||
| 		break; | ||||
| 	default: | ||||
| 		usnic_err("Unsupported transport %u\n", trans_type); | ||||
|  | ||||
| @ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, | ||||
| 		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, | ||||
| 					ISER_HEADERS_LEN, DMA_TO_DEVICE); | ||||
| 		kmem_cache_free(ig.desc_cache, tx_desc); | ||||
| 		tx_desc = NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_dec(&ib_conn->post_send_buf_count); | ||||
| 
 | ||||
| 	if (tx_desc->type == ISCSI_TX_CONTROL) { | ||||
| 	if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { | ||||
| 		/* this arithmetic is legal by libiscsi dd_data allocation */ | ||||
| 		task = (void *) ((long)(void *)tx_desc - | ||||
| 				  sizeof(struct iscsi_task)); | ||||
|  | ||||
| @ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | ||||
| 	/* getting here when the state is UP means that the conn is being *
 | ||||
| 	 * terminated asynchronously from the iSCSI layer's perspective.  */ | ||||
| 	if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | ||||
| 				      ISER_CONN_TERMINATING)) | ||||
| 		iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | ||||
| 				   ISCSI_ERR_CONN_FAILED); | ||||
| 					ISER_CONN_TERMINATING)){ | ||||
| 		if (ib_conn->iser_conn) | ||||
| 			iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | ||||
| 					   ISCSI_ERR_CONN_FAILED); | ||||
| 		else | ||||
| 			iser_err("iscsi_iser connection isn't bound\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Complete the termination process if no posts are pending */ | ||||
| 	if (ib_conn->post_recv_buf_count == 0 && | ||||
|  | ||||
| @ -4,5 +4,5 @@ | ||||
| 
 | ||||
| config MLX5_CORE | ||||
| 	tristate | ||||
| 	depends on PCI && X86 | ||||
| 	depends on PCI | ||||
| 	default n | ||||
|  | ||||
| @ -38,8 +38,10 @@ | ||||
| #include <linux/pci.h> | ||||
| #include <linux/spinlock_types.h> | ||||
| #include <linux/semaphore.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/radix-tree.h> | ||||
| 
 | ||||
| #include <linux/mlx5/device.h> | ||||
| #include <linux/mlx5/doorbell.h> | ||||
| 
 | ||||
| @ -227,6 +229,7 @@ struct mlx5_uuar_info { | ||||
| 	 * protect uuar allocation data structs | ||||
| 	 */ | ||||
| 	struct mutex		lock; | ||||
| 	u32			ver; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_bf { | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user