Additional 4.5-rc5 fixes

- One fix ocrdma - The new CQ API support was added to ocrdma, but they
   got the arming logic wrong, so without this, transfers eventually fail
   when they fail to arm the interrupt properly under load
 - Two related fixes for mlx4 - When we added the 64bit extended counters
   support to the core IB code, they forgot to update the RoCE side of the
   mlx4 driver (the IB side they properly updated).  I debated whether or
   not to include these patches as they could be considered feature
   enablement patches, but the existing code will blindy copy the 32bit
   counters, whether any counters were requested at all (a bug).  These
   two patches make it A) check to see that counters were requested and
   B) copy the right counters (the 64bit support is new, the 32bit is
   not).  For that reason I went ahead and took them.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWxI9xAAoJELgmozMOVy/dqRgP/3JbB5MMcqqrxCXGE7874YOO
 ZXldH2nJ+uIffWn1IUDJd01vWKDbxBD5joD+HgH1XLjmSQZf7I6APmayTMx43k/k
 Ps4fg4iDU2HY35w4s8hCgIj1WmKtYWPA2wHFB1VUO8nhLQD0fxCInw3feDhOXu23
 VYSBOypHIMHZfldOog/RXQ4/cCStTmhyDlPtcldWTpMm/SmBEs/iGoUXe5gLHZfk
 aKEK2OOC/Yk/iK4flnYsNcgDhFfTcFlpbUtBfLYvFd9UxBCkPsB7WE5BT06zvEHe
 Cp5IN7093Nhh66uKyDHyzBJFMrZB3crcqmEGsauE6iBF6iQItlvVMch7DHP4FEWP
 2VlYKbhINH7yfl/bXq9CyljA2wcPS5MDtQgrEwa/qMeA8sqEQWnyPgS4xGKnufoQ
 SbvPY9pdzhU2G6weoRSiVfpMXh2dl/kbOcsn/rywfyrJ4Ne1lE60Di75WEpwxD+I
 4cRktAZp5W2gnv9NJRA/iYMQCVxJ9Sd7aj+dFfWOzJf3uuOlvMALTPuWF4zyeT1U
 VKpptmAmq0l3zVtpnmuVNO4BqC+00L+sojrkeGX9ZhLBaJZfUkj10sjPAWGHNU28
 liQ3Wnqb7q5Z7ROBs5bbMub7Q4+hWiEINPQtFm4f43bmievI9QgPjoNQXjXea7vI
 W9WXEhYS1fjI9z3w4Oqg
 =QD36
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "One ocrdma fix:

   - The new CQ API support was added to ocrdma, but they got the arming
     logic wrong, so without this, transfers eventually fail when they
     fail to arm the interrupt properly under load

  Two related fixes for mlx4:

   - When we added the 64bit extended counters support to the core IB
     code, they forgot to update the RoCE side of the mlx4 driver (the
     IB side they properly updated).

     I debated whether or not to include these patches as they could be
     considered feature enablement patches, but the existing code will
     blindy copy the 32bit counters, whether any counters were requested
     at all (a bug).

     These two patches make it (a) check to see that counters were
     requested and (b) copy the right counters (the 64bit support is
     new, the 32bit is not).  For that reason I went ahead and took
     them"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx4: Add support for the port info class for RoCE ports
  IB/mlx4: Add support for extended counters over RoCE ports
  RDMA/ocrdma: Fix arm logic to align with new cq API
This commit is contained in:
Linus Torvalds 2016-02-22 12:04:11 -08:00
commit a16152c897
3 changed files with 54 additions and 30 deletions

View File

@ -817,17 +817,48 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
static void edit_counter(struct mlx4_counter *cnt,
struct ib_pma_portcounters *pma_cnt)
static void edit_counter(struct mlx4_counter *cnt, void *counters,
__be16 attr_id)
{
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
(be64_to_cpu(cnt->tx_bytes) >> 2));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
(be64_to_cpu(cnt->rx_bytes) >> 2));
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
be64_to_cpu(cnt->tx_frames));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
be64_to_cpu(cnt->rx_frames));
switch (attr_id) {
case IB_PMA_PORT_COUNTERS:
{
struct ib_pma_portcounters *pma_cnt =
(struct ib_pma_portcounters *)counters;
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
(be64_to_cpu(cnt->tx_bytes) >> 2));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
(be64_to_cpu(cnt->rx_bytes) >> 2));
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
be64_to_cpu(cnt->tx_frames));
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
be64_to_cpu(cnt->rx_frames));
break;
}
case IB_PMA_PORT_COUNTERS_EXT:
{
struct ib_pma_portcounters_ext *pma_cnt_ext =
(struct ib_pma_portcounters_ext *)counters;
pma_cnt_ext->port_xmit_data =
cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
pma_cnt_ext->port_rcv_data =
cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
break;
}
}
}
static int iboe_process_mad_port_info(void *out_mad)
{
struct ib_class_port_info cpi = {};
cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
memcpy(out_mad, &cpi, sizeof(cpi));
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
return iboe_process_mad_port_info((void *)(out_mad->data + 40));
memset(&counter_stats, 0, sizeof(counter_stats));
mutex_lock(&dev->counters_table[port_num - 1].mutex);
list_for_each_entry(tmp_counter,
@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
(void *)(out_mad->data + 40));
(void *)(out_mad->data + 40),
in_mad->mad_hdr.attr_id);
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
(in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
(in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);

View File

@ -323,9 +323,6 @@ struct ocrdma_cq {
*/
u32 max_hw_cqe;
bool phase_change;
bool deferred_arm, deferred_sol;
bool first_arm;
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
*/

View File

@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
cq->first_arm = true;
if (ib_ctx) {
uctx = get_ocrdma_ucontext(ib_ctx);
@ -2910,12 +2909,9 @@ expand_cqe:
}
stop_cqe:
cq->getp = cur_getp;
if (cq->deferred_arm || polled_hw_cqes) {
ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm,
cq->deferred_sol, polled_hw_cqes);
cq->deferred_arm = false;
cq->deferred_sol = false;
}
if (polled_hw_cqes)
ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
return i;
}
@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if (cq_flags & IB_CQ_SOLICITED)
sol_needed = true;
if (cq->first_arm) {
ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
cq->first_arm = false;
}
cq->deferred_arm = true;
cq->deferred_sol = sol_needed;
ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0;