forked from Minki/linux
77109cc282
The current INIT_HCA firmware command timeout is sufficient for the default number of resources (QPs, CQs, etc) being allocated, but if the HCA profile is modified to increase the amount of resources, then a spurious timeout is detected and HCA initialization fails. Increase the timeout for the INIT_HCA command to 10 seconds, which also brings it into line with all the other command timeouts. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
836 lines
29 KiB
C
836 lines
29 KiB
C
/*
|
|
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
|
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
|
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
|
*
|
|
* This software is available to you under a choice of one of two
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
* General Public License (GPL) Version 2, available from the file
|
|
* COPYING in the main directory of this source tree, or the
|
|
* OpenIB.org BSD license below:
|
|
*
|
|
* Redistribution and use in source and binary forms, with or
|
|
* without modification, are permitted provided that the following
|
|
* conditions are met:
|
|
*
|
|
* - Redistributions of source code must retain the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer.
|
|
*
|
|
* - Redistributions in binary form must reproduce the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer in the documentation and/or other materials
|
|
* provided with the distribution.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/mlx4/cmd.h>
|
|
|
|
#include "fw.h"
|
|
#include "icm.h"
|
|
|
|
enum {
|
|
MLX4_COMMAND_INTERFACE_MIN_REV = 2,
|
|
MLX4_COMMAND_INTERFACE_MAX_REV = 3,
|
|
MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
|
|
};
|
|
|
|
extern void __buggy_use_of_MLX4_GET(void);
|
|
extern void __buggy_use_of_MLX4_PUT(void);
|
|
|
|
#define MLX4_GET(dest, source, offset) \
|
|
do { \
|
|
void *__p = (char *) (source) + (offset); \
|
|
switch (sizeof (dest)) { \
|
|
case 1: (dest) = *(u8 *) __p; break; \
|
|
case 2: (dest) = be16_to_cpup(__p); break; \
|
|
case 4: (dest) = be32_to_cpup(__p); break; \
|
|
case 8: (dest) = be64_to_cpup(__p); break; \
|
|
default: __buggy_use_of_MLX4_GET(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define MLX4_PUT(dest, source, offset) \
|
|
do { \
|
|
void *__d = ((char *) (dest) + (offset)); \
|
|
switch (sizeof(source)) { \
|
|
case 1: *(u8 *) __d = (source); break; \
|
|
case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
|
|
case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
|
|
case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
|
|
default: __buggy_use_of_MLX4_PUT(); \
|
|
} \
|
|
} while (0)
|
|
|
|
static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
|
|
{
|
|
static const char *fname[] = {
|
|
[ 0] = "RC transport",
|
|
[ 1] = "UC transport",
|
|
[ 2] = "UD transport",
|
|
[ 3] = "XRC transport",
|
|
[ 4] = "reliable multicast",
|
|
[ 5] = "FCoIB support",
|
|
[ 6] = "SRQ support",
|
|
[ 7] = "IPoIB checksum offload",
|
|
[ 8] = "P_Key violation counter",
|
|
[ 9] = "Q_Key violation counter",
|
|
[10] = "VMM",
|
|
[16] = "MW support",
|
|
[17] = "APM support",
|
|
[18] = "Atomic ops support",
|
|
[19] = "Raw multicast support",
|
|
[20] = "Address vector port checking support",
|
|
[21] = "UD multicast support",
|
|
[24] = "Demand paging support",
|
|
[25] = "Router support"
|
|
};
|
|
int i;
|
|
|
|
mlx4_dbg(dev, "DEV_CAP flags:\n");
|
|
for (i = 0; i < ARRAY_SIZE(fname); ++i)
|
|
if (fname[i] && (flags & (1 << i)))
|
|
mlx4_dbg(dev, " %s\n", fname[i]);
|
|
}
|
|
|
|
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
u32 *outbox;
|
|
u8 field;
|
|
u16 size;
|
|
u16 stat_rate;
|
|
int err;
|
|
int i;
|
|
|
|
#define QUERY_DEV_CAP_OUT_SIZE 0x100
|
|
#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
|
|
#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
|
|
#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
|
|
#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
|
|
#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
|
|
#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
|
|
#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
|
|
#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
|
|
#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
|
|
#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
|
|
#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
|
|
#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
|
|
#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
|
|
#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
|
|
#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
|
|
#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
|
|
#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
|
|
#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
|
|
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
|
|
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
|
|
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
|
|
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
|
|
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
|
|
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
|
|
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
|
|
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
|
|
#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
|
|
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
|
|
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
|
|
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
|
|
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
|
|
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
|
|
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
|
|
#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
|
|
#define QUERY_DEV_CAP_BF_OFFSET 0x4c
|
|
#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
|
|
#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
|
|
#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
|
|
#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
|
|
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
|
|
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
|
|
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
|
|
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
|
|
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
|
|
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
|
|
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
|
|
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
|
|
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
|
|
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
|
|
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
|
|
#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
|
|
#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
|
|
#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
|
|
#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
|
|
#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
|
|
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
|
|
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
|
|
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
|
|
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
|
|
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
outbox = mailbox->buf;
|
|
|
|
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
if (err)
|
|
goto out;
|
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
|
|
dev_cap->reserved_qps = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
|
|
dev_cap->max_qps = 1 << (field & 0x1f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
|
|
dev_cap->reserved_srqs = 1 << (field >> 4);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
|
|
dev_cap->max_srqs = 1 << (field & 0x1f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
|
|
dev_cap->max_cq_sz = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
|
|
dev_cap->reserved_cqs = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
|
|
dev_cap->max_cqs = 1 << (field & 0x1f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
|
|
dev_cap->max_mpts = 1 << (field & 0x3f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
|
|
dev_cap->reserved_eqs = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
|
|
dev_cap->max_eqs = 1 << (field & 0x7);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
|
|
dev_cap->reserved_mtts = 1 << (field >> 4);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
|
|
dev_cap->max_mrw_sz = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
|
|
dev_cap->reserved_mrws = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
|
|
dev_cap->max_mtt_seg = 1 << (field & 0x3f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
|
|
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
|
|
dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
|
|
dev_cap->max_rdma_global = 1 << (field & 0x3f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
|
|
dev_cap->local_ca_ack_delay = field & 0x1f;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
|
|
dev_cap->num_ports = field & 0xf;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
|
|
dev_cap->max_msg_sz = 1 << (field & 0x1f);
|
|
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
|
|
dev_cap->stat_rate_support = stat_rate;
|
|
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
|
|
dev_cap->reserved_uars = field >> 4;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
|
|
dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
|
|
dev_cap->min_page_sz = 1 << field;
|
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
|
|
if (field & 0x80) {
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
|
|
dev_cap->bf_reg_size = 1 << (field & 0x1f);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
|
|
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
|
|
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
|
|
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
|
|
} else {
|
|
dev_cap->bf_reg_size = 0;
|
|
mlx4_dbg(dev, "BlueFlame not available\n");
|
|
}
|
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
|
|
dev_cap->max_sq_sg = field;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
|
|
dev_cap->max_sq_desc_sz = size;
|
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
|
|
dev_cap->max_qp_per_mcg = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
|
|
dev_cap->reserved_mgms = field & 0xf;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
|
|
dev_cap->max_mcgs = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
|
|
dev_cap->reserved_pds = field >> 4;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
|
|
dev_cap->max_pds = 1 << (field & 0x3f);
|
|
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
|
|
dev_cap->rdmarc_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
|
|
dev_cap->qpc_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
|
|
dev_cap->aux_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
|
|
dev_cap->altc_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
|
|
dev_cap->eqc_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
|
|
dev_cap->cqc_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
|
|
dev_cap->srq_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
|
|
dev_cap->cmpt_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
|
|
dev_cap->mtt_entry_sz = size;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
|
|
dev_cap->dmpt_entry_sz = size;
|
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
|
|
dev_cap->max_srq_sz = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
|
|
dev_cap->max_qp_sz = 1 << field;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
|
|
dev_cap->resize_srq = field & 1;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
|
|
dev_cap->max_rq_sg = field;
|
|
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
|
|
dev_cap->max_rq_desc_sz = size;
|
|
|
|
MLX4_GET(dev_cap->bmme_flags, outbox,
|
|
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
|
|
MLX4_GET(dev_cap->reserved_lkey, outbox,
|
|
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
|
|
MLX4_GET(dev_cap->max_icm_sz, outbox,
|
|
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
|
|
|
|
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
|
|
for (i = 1; i <= dev_cap->num_ports; ++i) {
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
|
|
dev_cap->max_vl[i] = field >> 4;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
|
|
dev_cap->max_mtu[i] = field >> 4;
|
|
dev_cap->max_port_width[i] = field & 0xf;
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
|
|
dev_cap->max_gids[i] = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
|
|
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
|
|
}
|
|
} else {
|
|
#define QUERY_PORT_MTU_OFFSET 0x01
|
|
#define QUERY_PORT_WIDTH_OFFSET 0x06
|
|
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
|
|
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
|
|
|
|
for (i = 1; i <= dev_cap->num_ports; ++i) {
|
|
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
|
|
MLX4_CMD_TIME_CLASS_B);
|
|
if (err)
|
|
goto out;
|
|
|
|
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
|
|
dev_cap->max_mtu[i] = field & 0xf;
|
|
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
|
|
dev_cap->max_port_width[i] = field & 0xf;
|
|
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
|
|
dev_cap->max_gids[i] = 1 << (field >> 4);
|
|
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
|
|
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
|
|
dev_cap->max_vl[i] = field & 0xf;
|
|
}
|
|
}
|
|
|
|
if (dev_cap->bmme_flags & 1)
|
|
mlx4_dbg(dev, "Base MM extensions: yes "
|
|
"(flags %d, rsvd L_Key %08x)\n",
|
|
dev_cap->bmme_flags, dev_cap->reserved_lkey);
|
|
else
|
|
mlx4_dbg(dev, "Base MM extensions: no\n");
|
|
|
|
/*
|
|
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
|
|
* we can't use any EQs whose doorbell falls on that page,
|
|
* even if the EQ itself isn't reserved.
|
|
*/
|
|
dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
|
|
dev_cap->reserved_eqs);
|
|
|
|
mlx4_dbg(dev, "Max ICM size %lld MB\n",
|
|
(unsigned long long) dev_cap->max_icm_sz >> 20);
|
|
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
|
|
dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
|
|
mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
|
|
dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
|
|
mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
|
|
dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
|
|
mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
|
|
dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
|
|
mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
|
|
dev_cap->reserved_mrws, dev_cap->reserved_mtts);
|
|
mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
|
|
dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
|
|
mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
|
|
dev_cap->max_pds, dev_cap->reserved_mgms);
|
|
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
|
|
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
|
|
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
|
|
dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
|
|
dev_cap->max_port_width[1]);
|
|
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
|
|
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
|
|
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
|
|
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
|
|
|
|
dump_dev_cap_flags(dev, dev_cap->flags);
|
|
|
|
out:
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
return err;
|
|
}
|
|
|
|
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
struct mlx4_icm_iter iter;
|
|
__be64 *pages;
|
|
int lg;
|
|
int nent = 0;
|
|
int i;
|
|
int err = 0;
|
|
int ts = 0, tc = 0;
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
|
|
pages = mailbox->buf;
|
|
|
|
for (mlx4_icm_first(icm, &iter);
|
|
!mlx4_icm_last(&iter);
|
|
mlx4_icm_next(&iter)) {
|
|
/*
|
|
* We have to pass pages that are aligned to their
|
|
* size, so find the least significant 1 in the
|
|
* address or size and use that as our log2 size.
|
|
*/
|
|
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
|
|
if (lg < MLX4_ICM_PAGE_SHIFT) {
|
|
mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
|
|
MLX4_ICM_PAGE_SIZE,
|
|
(unsigned long long) mlx4_icm_addr(&iter),
|
|
mlx4_icm_size(&iter));
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
|
|
if (virt != -1) {
|
|
pages[nent * 2] = cpu_to_be64(virt);
|
|
virt += 1 << lg;
|
|
}
|
|
|
|
pages[nent * 2 + 1] =
|
|
cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
|
|
(lg - MLX4_ICM_PAGE_SHIFT));
|
|
ts += 1 << (lg - 10);
|
|
++tc;
|
|
|
|
if (++nent == MLX4_MAILBOX_SIZE / 16) {
|
|
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
|
|
MLX4_CMD_TIME_CLASS_B);
|
|
if (err)
|
|
goto out;
|
|
nent = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (nent)
|
|
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
|
|
if (err)
|
|
goto out;
|
|
|
|
switch (op) {
|
|
case MLX4_CMD_MAP_FA:
|
|
mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
|
|
break;
|
|
case MLX4_CMD_MAP_ICM_AUX:
|
|
mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
|
|
break;
|
|
case MLX4_CMD_MAP_ICM:
|
|
mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
|
|
tc, ts, (unsigned long long) virt - (ts << 10));
|
|
break;
|
|
}
|
|
|
|
out:
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
return err;
|
|
}
|
|
|
|
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
|
|
{
|
|
return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
|
|
}
|
|
|
|
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
|
|
{
|
|
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
|
|
}
|
|
|
|
|
|
int mlx4_RUN_FW(struct mlx4_dev *dev)
|
|
{
|
|
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
|
|
}
|
|
|
|
int mlx4_QUERY_FW(struct mlx4_dev *dev)
|
|
{
|
|
struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
|
|
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
u32 *outbox;
|
|
int err = 0;
|
|
u64 fw_ver;
|
|
u16 cmd_if_rev;
|
|
u8 lg;
|
|
|
|
#define QUERY_FW_OUT_SIZE 0x100
|
|
#define QUERY_FW_VER_OFFSET 0x00
|
|
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
|
|
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
|
|
#define QUERY_FW_ERR_START_OFFSET 0x30
|
|
#define QUERY_FW_ERR_SIZE_OFFSET 0x38
|
|
#define QUERY_FW_ERR_BAR_OFFSET 0x3c
|
|
|
|
#define QUERY_FW_SIZE_OFFSET 0x00
|
|
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
|
|
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
outbox = mailbox->buf;
|
|
|
|
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
if (err)
|
|
goto out;
|
|
|
|
MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
|
|
/*
|
|
* FW subminor version is at more significant bits than minor
|
|
* version, so swap here.
|
|
*/
|
|
dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
|
|
((fw_ver & 0xffff0000ull) >> 16) |
|
|
((fw_ver & 0x0000ffffull) << 16);
|
|
|
|
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
|
|
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
|
|
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
|
|
mlx4_err(dev, "Installed FW has unsupported "
|
|
"command interface revision %d.\n",
|
|
cmd_if_rev);
|
|
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
|
|
(int) (dev->caps.fw_ver >> 32),
|
|
(int) (dev->caps.fw_ver >> 16) & 0xffff,
|
|
(int) dev->caps.fw_ver & 0xffff);
|
|
mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
|
|
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
|
|
err = -ENODEV;
|
|
goto out;
|
|
}
|
|
|
|
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
|
|
dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
|
|
|
|
MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
|
|
cmd->max_cmds = 1 << lg;
|
|
|
|
mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
|
|
(int) (dev->caps.fw_ver >> 32),
|
|
(int) (dev->caps.fw_ver >> 16) & 0xffff,
|
|
(int) dev->caps.fw_ver & 0xffff,
|
|
cmd_if_rev, cmd->max_cmds);
|
|
|
|
MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
|
|
MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
|
|
MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
|
|
fw->catas_bar = (fw->catas_bar >> 6) * 2;
|
|
|
|
mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
|
|
(unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
|
|
|
|
MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
|
|
MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
|
|
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
|
|
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
|
|
|
|
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
|
|
|
|
/*
|
|
* Round up number of system pages needed in case
|
|
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
|
|
*/
|
|
fw->fw_pages =
|
|
ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
|
|
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
|
|
|
|
mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
|
|
(unsigned long long) fw->clr_int_base, fw->clr_int_bar);
|
|
|
|
out:
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
return err;
|
|
}
|
|
|
|
static void get_board_id(void *vsd, char *board_id)
|
|
{
|
|
int i;
|
|
|
|
#define VSD_OFFSET_SIG1 0x00
|
|
#define VSD_OFFSET_SIG2 0xde
|
|
#define VSD_OFFSET_MLX_BOARD_ID 0xd0
|
|
#define VSD_OFFSET_TS_BOARD_ID 0x20
|
|
|
|
#define VSD_SIGNATURE_TOPSPIN 0x5ad
|
|
|
|
memset(board_id, 0, MLX4_BOARD_ID_LEN);
|
|
|
|
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
|
|
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
|
|
strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
|
|
} else {
|
|
/*
|
|
* The board ID is a string but the firmware byte
|
|
* swaps each 4-byte word before passing it back to
|
|
* us. Therefore we need to swab it before printing.
|
|
*/
|
|
for (i = 0; i < 4; ++i)
|
|
((u32 *) board_id)[i] =
|
|
swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
|
|
}
|
|
}
|
|
|
|
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
u32 *outbox;
|
|
int err;
|
|
|
|
#define QUERY_ADAPTER_OUT_SIZE 0x100
|
|
#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
|
|
#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
|
|
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
|
|
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
|
|
#define QUERY_ADAPTER_VSD_OFFSET 0x20
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
outbox = mailbox->buf;
|
|
|
|
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
if (err)
|
|
goto out;
|
|
|
|
MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
|
|
MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
|
|
MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
|
|
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
|
|
|
|
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
|
|
adapter->board_id);
|
|
|
|
out:
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
return err;
|
|
}
|
|
|
|
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
__be32 *inbox;
|
|
int err;
|
|
|
|
#define INIT_HCA_IN_SIZE 0x200
|
|
#define INIT_HCA_VERSION_OFFSET 0x000
|
|
#define INIT_HCA_VERSION 2
|
|
#define INIT_HCA_FLAGS_OFFSET 0x014
|
|
#define INIT_HCA_QPC_OFFSET 0x020
|
|
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
|
|
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
|
|
#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
|
|
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
|
|
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
|
|
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
|
|
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
|
|
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
|
|
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
|
|
#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
|
|
#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
|
|
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
|
|
#define INIT_HCA_MCAST_OFFSET 0x0c0
|
|
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
|
|
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
|
|
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
|
|
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
|
|
#define INIT_HCA_TPT_OFFSET 0x0f0
|
|
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
|
|
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
|
|
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
|
|
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
|
|
#define INIT_HCA_UAR_OFFSET 0x120
|
|
#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
|
|
#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
inbox = mailbox->buf;
|
|
|
|
memset(inbox, 0, INIT_HCA_IN_SIZE);
|
|
|
|
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
|
|
|
|
#if defined(__LITTLE_ENDIAN)
|
|
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
|
|
#elif defined(__BIG_ENDIAN)
|
|
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
|
|
#else
|
|
#error Host endianness not defined
|
|
#endif
|
|
/* Check port for UD address vector: */
|
|
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
|
|
|
|
/* QPC/EEC/CQC/EQC/RDMARC attributes */
|
|
|
|
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
|
|
MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
|
|
MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
|
|
MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
|
|
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
|
|
|
|
/* multicast attributes */
|
|
|
|
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
|
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
|
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
|
|
|
/* TPT attributes */
|
|
|
|
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
|
|
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
|
|
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
|
|
|
|
/* UAR attributes */
|
|
|
|
MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
|
|
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
|
|
|
|
if (err)
|
|
mlx4_err(dev, "INIT_HCA returns %d\n", err);
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
return err;
|
|
}
|
|
|
|
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
u32 *inbox;
|
|
int err;
|
|
u32 flags;
|
|
u16 field;
|
|
|
|
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
|
|
#define INIT_PORT_IN_SIZE 256
|
|
#define INIT_PORT_FLAGS_OFFSET 0x00
|
|
#define INIT_PORT_FLAG_SIG (1 << 18)
|
|
#define INIT_PORT_FLAG_NG (1 << 17)
|
|
#define INIT_PORT_FLAG_G0 (1 << 16)
|
|
#define INIT_PORT_VL_SHIFT 4
|
|
#define INIT_PORT_PORT_WIDTH_SHIFT 8
|
|
#define INIT_PORT_MTU_OFFSET 0x04
|
|
#define INIT_PORT_MAX_GID_OFFSET 0x06
|
|
#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
|
|
#define INIT_PORT_GUID0_OFFSET 0x10
|
|
#define INIT_PORT_NODE_GUID_OFFSET 0x18
|
|
#define INIT_PORT_SI_GUID_OFFSET 0x20
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
if (IS_ERR(mailbox))
|
|
return PTR_ERR(mailbox);
|
|
inbox = mailbox->buf;
|
|
|
|
memset(inbox, 0, INIT_PORT_IN_SIZE);
|
|
|
|
flags = 0;
|
|
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
|
|
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
|
|
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
|
|
|
|
field = 128 << dev->caps.mtu_cap[port];
|
|
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
|
|
field = dev->caps.gid_table_len[port];
|
|
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
|
|
field = dev->caps.pkey_table_len[port];
|
|
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
|
|
|
|
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
} else
|
|
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
|
|
|
|
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
|
|
{
|
|
return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
|
|
}
|
|
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
|
|
|
|
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
|
|
{
|
|
return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
|
|
}
|
|
|
|
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
|
|
{
|
|
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
|
|
MLX4_CMD_SET_ICM_SIZE,
|
|
MLX4_CMD_TIME_CLASS_A);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* Round up number of system pages needed in case
|
|
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
|
|
*/
|
|
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
|
|
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mlx4_NOP(struct mlx4_dev *dev)
|
|
{
|
|
/* Input modifier of 0x1f means "finish as soon as possible." */
|
|
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
|
|
}
|